hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a14a30c29b7b751e8b686fcef3585f1fc8608320
| 7,837
|
py
|
Python
|
api/data/constants/npc_items.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 1
|
2021-06-15T07:31:13.000Z
|
2021-06-15T07:31:13.000Z
|
api/data/constants/npc_items.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 1
|
2021-06-01T10:14:32.000Z
|
2021-06-02T10:54:12.000Z
|
api/data/constants/npc_items.py
|
UP929312/CommunityBot
|
c16294e8ff4f47d9a1e8c18c9cd4011e7ebbd67a
|
[
"Apache-2.0"
] | 2
|
2021-06-01T10:59:15.000Z
|
2021-06-03T18:29:36.000Z
|
NPC_ITEMS = {
"Amelia": {
"PARKOUR_CONTROLLER": 10000,
"PARKOUR_POINT": 1000,
"PARKOUR_TIMES": 2000,
"SOCIAL_DISPLAY": 10000,
"EGG_HUNT": 1000,
"ISLAND_NPC": 50000,
"TIC_TAC_TOE": 2500,
"CONNECT_FOUR": 2500,
"ROCK_PAPER_SHEARS": 10000,
"SHOWCASE_BLOCK": 50000
},
"Bartender": {
"CHEAP_COFFEE": 1000,
"TEPID_GREEN_TEA": 1000,
"PULPOUS_ORANGE_JUICE": 1000,
"BITTER_ICE_TEA": 1000,
"KNOCKOFF_COLA": 1000,
"DECENT_COFFEE": 5000,
"WOLF_FUR_MIXIN": 150000,
"ZOMBIE_BRAIN_MIXIN": 150000,
"SPIDER_EGG_MIXIN": 150000,
"END_PORTAL_FUMES_MIXIN": 150000
},
"Adventurer": {
"ZOMBIE_TALISMAN": 500,
"SKELETON_TALISMAN": 500,
"VILLAGE_TALISMAN": 2500,
"MINE_TALISMAN": 2500,
"INTIMIDATION_TALISMAN": 10000,
"SCAVENGER_TALISMAN": 10000
},
"Lumber Merchant": {
"ROOKIE_AXE": 12,
"PROMISING_AXE": 35,
"SWEET_AXE": 100,
"EFFICIENT_AXE": 100
},
"Rosetta": {
"IRON_BOOTS": 20,
"IRON_LEGGINGS": 30,
"IRON_CHESTPLATE": 25,
"IRON_HELMET": 15,
"ROSETTA_BOOTS": 960,
"ROSETTA_LEGGINGS": 1200,
"ROSETTA_CHESTPLATE": 1320,
"ROSETTA_HELMET": 1050,
"SQUIRE_BOOTS": 4000,
"SQUIRE_LEGGINGS": 7000,
"SQUIRE_CHESTPLATE": 8000,
"SQUIRE_HELMET": 5000,
"MERCENARY_BOOTS": 30000,
"MERCENARY_LEGGINGS": 45000,
"MERCENARY_CHESTPLATE": 70000,
"MERCENARY_HELMET": 35000,
"CELESTE_BOOTS": 4000,
"CELESTE_LEGGINGS": 7000,
"CELESTE_CHESTPLATE": 8000,
"CELESTE_HELMET": 5000,
"STARLIGHT_BOOTS": 30000,
"STARLIGHT_LEGGINGS": 45000,
"STARLIGHT_CHESTPLATE": 70000,
"STARLIGHT_HELMET": 35000,
"SQUIRE_SWORD": 5000,
"MERCENARY_AXE": 30000,
"CELESTE_WAND": 5000,
"STARLIGHT_WAND": 30000
},
"Weaponsmith": {
"UNDEAD_SWORD": 100,
"END_SWORD": 150,
"SPIDER_SWORD": 100,
"DIAMOND_SWORD": 60,
"BOW": 25,
"WITHER_BOW": 250,
"ARTISANAL_SHORTBOW": 600,
"ARROW": 40
},
"Mine Merchant": {
"ROOKIE_PICKAXE": 12,
"PROMISING_PICKAXE": 35,
"TORCH": 1,
"ONYX": 100
},
"Librarian": {
"BOOK": 20,
},
"Plumber Joe": {
"PLUMBER_SPONGE": 50
},
"Fish Mercant": {
"FISHING_ROD": 100
},
"Alchemist": {
"BREWING_STAND_ITEM": 30,
"GLASS_BOTTLE": 6,
"POTION": 6,
"SUGAR": 4,
"SPECKLED_MELON": 10,
"BLAZE_POWDER": 12,
"GOLDEN_CARROT": 7
},
"Farm Merchant": {
"ENCHANTED_BONE_MEAL": 2,
"ROOKIE_HOE": 100
},
"Scoop": {
"SNOW_SHOVEL": 2500,
},
"Carpenter Shop": {
"SCARECROW": 120,
"WEAPON_RACK": 100,
"ARMOR_SHOWCASE": 100,
"FLOWER_POT_ITEM": 150,
"ENCHANTING_PLUS": 10000,
"CRAFTING_PLUS": 10000,
"FURNACE_PLUS": 15000,
"WOOD_CHEST": 15000,
"FIREPLACE": 10000,
"CHEST_SHELVES": 15000,
},
"Sherry": {
"WINTER_ROD": 40000,
"ICE_BAIT": 12,
"BOTTLE_OF_JYRRE": 100,
"SNOW_BLASTER": 75000,
"LARGE_WINTER_SACK": 250000
},
"Gregory the Opportunitist": {
"DECENT_BOW": 500
},
"Pearl Dealer": {
"STONK_PICKAXE": 499999,
"REMNANT_OF_THE_EYE": 200000
},
"Old Shaman Nyko": {
"TRUE_ESSENCE": 25000
},
"Master Tactician Funk": {
"TACTICIAN_SWORD": 35000,
},
"Melancholic Viking": {
"RAIDER_AXE": 130000,
"PARK_JUNGLE_TRAVEL_SCROLL": 70000,
"VIKING_TEAR": 15000
},
"Malmar": {
"MINING_PUMPKIN": 100000
},
"Bubu": {
"FRACTURED_MITHRIL_PICKAXE": 10000,
"BIOFUEL": 10000
},
"Iron Forger": {
"CHAINMAIL_HELMET": 50,
"CHAINMAIL_CHESTPLATE": 100,
"CHAINMAIL_LEGGINGS": 75,
"CHAINMAIL_BOOTS": 50
},
"Gold Forger": {
"GOLD_HELMET": 10,
"GOLD_CHESTPLATE": 16,
"GOLD_LEGGINGS": 14,
"GOLD_BOOTS": 9,
"FANCY_SWORD": 80
},
"Wool Weaver": {
"STAINED_CLAY:1": 8,
"STAINED_GLASS:1": 16,
"STAINED_GLASS_PANE:1": 16,
"CARPET:1": 32,
"WOOL:1": 32,
"STAINED_CLAY:2": 8,
"STAINED_GLASS:2": 16,
"STAINED_GLASS_PANE:2": 16,
"CARPET:2": 32,
"WOOL:2": 32,
"STAINED_CLAY:3": 8,
"STAINED_GLASS:3": 16,
"STAINED_GLASS_PANE:3": 16,
"CARPET:3": 32,
"WOOL:3": 32,
"STAINED_CLAY:4": 8,
"STAINED_GLASS:4": 16,
"STAINED_GLASS_PANE:4": 16,
"CARPET:4": 32,
"WOOL:4": 32,
"STAINED_CLAY:5": 8,
"STAINED_GLASS:5": 16,
"STAINED_GLASS_PANE:5": 16,
"CARPET:5": 32,
"WOOL:5": 32,
"STAINED_CLAY:6": 8,
"STAINED_GLASS:6": 16,
"STAINED_GLASS_PANE:6": 16,
"CARPET:6": 32,
"WOOL:6": 32,
"STAINED_CLAY:7": 8,
"STAINED_GLASS:7": 16,
"STAINED_GLASS_PANE:7": 16,
"CARPET:7": 32,
"WOOL:7": 32,
"STAINED_CLAY:8": 8,
"STAINED_GLASS:8": 16,
"STAINED_GLASS_PANE:8": 16,
"CARPET:8": 32,
"WOOL:8": 32,
"STAINED_CLAY:9": 8,
"STAINED_GLASS:9": 16,
"STAINED_GLASS_PANE:9": 16,
"CARPET:9": 32,
"WOOL:9": 32,
"STAINED_CLAY:10": 8,
"STAINED_GLASS:10": 16,
"STAINED_GLASS_PANE:10": 16,
"CARPET:10": 32,
"WOOL:10": 32,
"STAINED_CLAY:11": 8,
"STAINED_GLASS:11": 16,
"STAINED_GLASS_PANE:11": 16,
"CARPET:11": 32,
"WOOL:11": 32,
"STAINED_CLAY:12": 8,
"STAINED_GLASS:12": 16,
"STAINED_GLASS_PANE:12": 16,
"CARPET:12": 32,
"WOOL:12": 32,
"STAINED_CLAY:13": 8,
"STAINED_GLASS:13": 16,
"STAINED_GLASS_PANE:13": 16,
"CARPET:13": 32,
"WOOL:13": 32,
"STAINED_CLAY:14": 8,
"STAINED_GLASS:14": 16,
"STAINED_GLASS_PANE:14": 16,
"CARPET:14": 32,
"WOOL:14": 32,
"STAINED_CLAY:15": 8,
"STAINED_GLASS:15": 16,
"STAINED_GLASS_PANE:15": 16,
"CARPET:15": 32,
"WOOL:15": 32,
"STAINED_CLAY:16": 8,
"STAINED_GLASS:16": 16,
"STAINED_GLASS_PANE:16": 16,
"CARPET:16": 32,
"WOOL:16": 32,
},
"Ophelia": {
"UNDEAD_BOW": 80000,
"ARROW": 5,
"SUPER_CLEAVER": 80000,
"STONE_CHESTPLATE": 80000,
"MENDER_HELMET": 80000,
"DARK_GOGGLES": 80000,
"SUPERBOOM_TNT": 2500,
"SUPER_UNDEAD_BOW": 800000,
"HYPER_CLEAVER": 800000,
"METAL_CHESTPLATE": 800000,
"MENDER_FEDORA": 800000,
"SHADOW_GOGGLES": 800000,
"DEATH_BOW": 5000000,
"GIANT_CLEAVER": 5000000,
"STEEL_CHESTPLATE": 5000000,
"MENDER_CROWN": 5000000,
"WITHER_GOGGLES": 5000000
},
"Zog": {
"PET_ITEM_ALL_SKILL_SKILL_BOOST": 50000,
"PET_ITEM_FARMING_SKILL_BOOST_COMMON": 60000,
"PET_ITEM_FARMING_SKILL_BOOST_RARE": 500000,
"PET_ITEM_MINING_SKILL_BOOST_COMMON": 60000,
"PET_ITEM_MINING_SKILL_BOOST_RARE": 500000,
"PET_ITEM_COMBAT_SKILL_BOOST_COMMON": 60000,
"PET_ITEM_FORAGING_SKILL_BOOST_COMMON": 60000,
"PET_ITEM_FISHING_SKILL_BOOST_COMMON": 60000,
"PET_ITEM_BIG_TEETH": 750000,
"PET_ITEM_SHARPENED_CLAWS": 750000,
"PET_ITEM_IRON_CLAWS": 1000000,
"PET_ITEM_HARDENED_SCALES": 1000000,
"PET_ITEM_BUBBLEGUM": 5000000,
},
}
| 27.498246
| 47
| 0.532985
|
347696b34629ad7e2fc1c6f15bf4c3022ab9a44f
| 199
|
py
|
Python
|
language/urls.py
|
vsanasc/sbrain
|
c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461
|
[
"BSD-3-Clause"
] | 1
|
2019-10-22T19:17:59.000Z
|
2019-10-22T19:17:59.000Z
|
language/urls.py
|
vsanasc/sbrain
|
c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461
|
[
"BSD-3-Clause"
] | null | null | null |
language/urls.py
|
vsanasc/sbrain
|
c0d0c24ea347d6bd0f34b9fdc3d7f01563ba0461
|
[
"BSD-3-Clause"
] | null | null | null |
from django.urls import path, include
urlpatterns = [
path(
r'api-auth/',
include(
'rest_framework.urls',
namespace='rest_framework'
)
)
]
| 13.266667
| 38
| 0.512563
|
1d0ce50e6556bb645b12d1f5422e4bac3dbe7464
| 4,406
|
py
|
Python
|
sdmetrics/column_pairs/statistical/kl_divergence.py
|
TanguyUrvoy/SDMetrics
|
eacec8a4cd9b9399e659d2b0716e0f723c5aa876
|
[
"MIT"
] | null | null | null |
sdmetrics/column_pairs/statistical/kl_divergence.py
|
TanguyUrvoy/SDMetrics
|
eacec8a4cd9b9399e659d2b0716e0f723c5aa876
|
[
"MIT"
] | null | null | null |
sdmetrics/column_pairs/statistical/kl_divergence.py
|
TanguyUrvoy/SDMetrics
|
eacec8a4cd9b9399e659d2b0716e0f723c5aa876
|
[
"MIT"
] | null | null | null |
"""ColumnPair metrics based on Kullback–Leibler Divergence."""
import numpy as np
import pandas as pd
from scipy.special import kl_div
from sdmetrics.column_pairs.base import ColumnPairsMetric
from sdmetrics.goal import Goal
from sdmetrics.utils import get_frequencies
class ContinuousKLDivergence(ColumnPairsMetric):
"""Continuous Kullback–Leibler Divergence based metric.
This approximates the KL divergence by binning the continuous values
to turn them into categorical values and then computing the relative
entropy. Afterwards normalizes the value applying ``1 / (1 + KLD)``.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = 'Continuous Kullback–Leibler Divergence'
goal = Goal.MAXIMIZE
min_value = 0.0
max_value = 1.0
@staticmethod
def compute(real_data, synthetic_data):
"""Compare two pairs of continuous columns using Kullback–Leibler Divergence.
Args:
real_data (pandas.DataFrame):
The values from the real dataset, passed as pandas.DataFrame
with 2 columns.
synthetic_data (pandas.DataFrame):
The values from the synthetic dataset, passed as a
pandas.DataFrame with 2 columns.
Returns:
Union[float, tuple[float]]:
Metric output.
"""
real_data[pd.isna(real_data)] = 0.0
synthetic_data[pd.isna(synthetic_data)] = 0.0
column1, column2 = real_data.columns[:2]
real, xedges, yedges = np.histogram2d(real_data[column1], real_data[column2])
synthetic, _, _ = np.histogram2d(
synthetic_data[column1], synthetic_data[column2], bins=[xedges, yedges])
f_obs, f_exp = synthetic.flatten() + 1e-5, real.flatten() + 1e-5
f_obs, f_exp = f_obs / np.sum(f_obs), f_exp / np.sum(f_exp)
return 1 / (1 + np.sum(kl_div(f_obs, f_exp)))
@classmethod
def normalize(cls, raw_score):
"""Return the `raw_score` as is, since it is already normalized.
Args:
raw_score (float):
The value of the metric from `compute`.
Returns:
float:
The normalized value of the metric
"""
return super().normalize(raw_score)
class DiscreteKLDivergence(ColumnPairsMetric):
"""Discrete Kullback–Leibler Divergence based metric.
This computes the KL divergence and afterwards normalizes the
value applying ``1 / (1 + KLD)``.
Attributes:
name (str):
Name to use when reports about this metric are printed.
goal (sdmetrics.goal.Goal):
The goal of this metric.
min_value (Union[float, tuple[float]]):
Minimum value or values that this metric can take.
max_value (Union[float, tuple[float]]):
Maximum value or values that this metric can take.
"""
name = 'Discrete Kullback–Leibler Divergence'
goal = Goal.MAXIMIZE
min_value = 0.0
max_value = 1.0
@staticmethod
def compute(real_data, synthetic_data):
"""Compute the KL divergence.
Args:
real_data:
The values from the real dataset.
synthetic_data:
The values from the synthetic dataset.
Returns:
Union[float, tuple[float]]:
Metric output or outputs.
"""
columns = real_data.columns[:2]
real = real_data[columns].itertuples(index=False)
synthetic = synthetic_data[columns].itertuples(index=False)
f_obs, f_exp = get_frequencies(real, synthetic)
return 1 / (1 + np.sum(kl_div(f_obs, f_exp)))
@classmethod
def normalize(cls, raw_score):
"""Return the `raw_score` as is, since it is already normalized.
Args:
raw_score (float):
The value of the metric from `compute`.
Returns:
float:
The normalized value of the metric
"""
return super().normalize(raw_score)
| 32.637037
| 85
| 0.622106
|
ae28d93c09bba3f1c814fb2a69ba3d26acdcd8d5
| 80
|
py
|
Python
|
Chapter 01/ch1_35.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 01/ch1_35.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
Chapter 01/ch1_35.py
|
bpbpublications/TEST-YOUR-SKILLS-IN-PYTHON-LANGUAGE
|
f6a4194684515495d00aa38347a725dd08f39a0c
|
[
"MIT"
] | null | null | null |
for i in range(5,7):
print("{:5d} {:5d} {:5d}".format(i, i ** 2, i ** 3))
| 26.666667
| 57
| 0.4375
|
926de9954b506f1cf831ffb4db3074afeac06cee
| 3,826
|
py
|
Python
|
tests/unit/test_handler.py
|
avatarnewyork/sam-media-convert
|
a929e98664732206192912615123215a2ebd09b0
|
[
"MIT"
] | null | null | null |
tests/unit/test_handler.py
|
avatarnewyork/sam-media-convert
|
a929e98664732206192912615123215a2ebd09b0
|
[
"MIT"
] | null | null | null |
tests/unit/test_handler.py
|
avatarnewyork/sam-media-convert
|
a929e98664732206192912615123215a2ebd09b0
|
[
"MIT"
] | null | null | null |
import pytest
import sys
import os
sys.path.append(os.path.dirname(sys.path[0]))
from media_convert import app
@pytest.fixture()
def file_upload_event():
""" Generates File Upload Event"""
return {
"Records": [
{
"eventVersion": "2.1",
"eventSource": "aws:s3",
"awsRegion": "us-east-1",
"eventTime": "2020-08-19T13:18:56.519Z",
"eventName": "ObjectCreated:Put",
"userIdentity": {
"principalId": "AWS:HDJHFD8DKJKDFGFY"
},
"requestParameters": {
"sourceIPAddress": "45.35.111.111"
},
"responseElements": {
"x-amz-request-id": "F81315ACA2A78E86",
"x-amz-id-2": "skwoZb0M+gZGWswijGYxUlG/O0MZ2cfl0GWdCGiS5foUH8FjMPlyeTlXeazYOGmmSmlcKJ09ECe9r2KShDnUNUMPaykzi9Nh"
},
"s3": {
"s3SchemaVersion": "1.0",
"configurationId": "test-uploads",
"bucket": {
"name": "test-bucket",
"ownerIdentity": {
"principalId": "A16H80YXC6P9WD"
},
"arn": "arn:aws:s3:::test-stage"
},
"object": {
"key": "uploads/unprocessed/test.mov",
"size": 4154,
"eTag": "281857964fc7b30d992b869e8ddf0f0f",
"versionId": "OCsve7FQxlb7sm1xXL2_HDJrSYUo02ZG",
"sequencer": "005F3D26C625FD0721"
}
}
}
]
}
@pytest.fixture()
def s3_input_file():
""" Input File String """
return "s3://media-convert/prefix/subdir/uploads/sample.MOV"
@pytest.fixture()
def invalid_s3_event():
return {"Records": [{"key": "value"}]}
def test_get_s3_file_url(file_upload_event):
ret = app.get_s3_file_url(file_upload_event)
assert ret == "s3://test-bucket/uploads/unprocessed/test.mov"
def test_get_s3_file_url_without_event():
with pytest.raises(Exception) as e:
ret = app.get_s3_file_url("")
assert str(e.value) == 'Missing S3 Event'
def test_get_s3_file_url_invalid_event(invalid_s3_event):
with pytest.raises(Exception) as e:
ret = app.get_s3_file_url(invalid_s3_event)
assert str(e.value) == 'Invalid S3 Event'
def test_get_s3_output_path_without_slash(s3_input_file):
destination = app.get_s3_output_path(s3_input_file, 'processed')
assert destination == 's3://media-convert/prefix/subdir/processed/'
def test_get_s3_output_path_with_slash(s3_input_file):
destination = app.get_s3_output_path(s3_input_file, 'processed/')
assert destination == 's3://media-convert/prefix/subdir/processed/'
def test_get_settings(s3_input_file):
settings = app.get_settings(s3_input_file, "")
test_json = {
'Inputs': [{
'FileInput': s3_input_file
}]
}
assert test_json == settings
def test_get_settings_relative_path(s3_input_file):
settings = app.get_settings(s3_input_file, "processed")
test_json = {
'Inputs': [{
'FileInput': s3_input_file
}],
'OutputGroups': [{
'OutputGroupSettings': {
'FileGroupSettings': {
'Destination': "s3://media-convert/prefix/subdir/processed/"
}
}
}]
}
assert test_json == settings
| 34.468468
| 136
| 0.516989
|
3265ff422be38635733590b99eb9709403f0b47a
| 859
|
py
|
Python
|
code/10_h_question_game/intents/functions/location/intent_location.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 1
|
2021-09-08T09:21:16.000Z
|
2021-09-08T09:21:16.000Z
|
code/10_g_smart_home/intents/functions/location/intent_location.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | null | null | null |
code/10_g_smart_home/intents/functions/location/intent_location.py
|
padmalcom/AISpeechAssistant
|
b7501a23a8f513acb5043f3c7bb06df129bdc2cc
|
[
"Apache-2.0"
] | 2
|
2022-02-06T09:54:40.000Z
|
2022-03-01T07:52:51.000Z
|
from loguru import logger
from chatbot import register_call
import global_variables
import random
import os
import yaml
import geocoder
@register_call("location")
def location(session_id = "general", dummy=0):
config_path = os.path.join('intents','functions','location','config_location.yml')
cfg = None
with open(config_path, "r", encoding='utf8') as ymlfile:
cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)
if not cfg:
logger.error("Konnte Konfigurationsdatei für die Lokalisierung nicht lesen.")
return ""
# Holen der Sprache aus der globalen Konfigurationsdatei
LANGUAGE = global_variables.voice_assistant.cfg['assistant']['language']
YOU_ARE_HERE = random.choice(cfg['intent']['location'][LANGUAGE]['youarehere'])
# Ermittle den Standort mittels IP
loc = geocoder.ip('me')
return random.choice(YOU_ARE_HERE).format(loc.city)
| 29.62069
| 83
| 0.760186
|
e25f666091f71c7f325ac7fb58566a6d45edc0fe
| 21,721
|
py
|
Python
|
research/object_detection/protos/faster_rcnn_pb2.py
|
chmod600/tl_classify
|
5f6393cf6834b426a8f7e6b8dd91baa0bdc9f575
|
[
"Apache-2.0"
] | 1
|
2018-08-17T00:42:25.000Z
|
2018-08-17T00:42:25.000Z
|
research/object_detection/protos/faster_rcnn_pb2.py
|
chmod600/tl_classify
|
5f6393cf6834b426a8f7e6b8dd91baa0bdc9f575
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/protos/faster_rcnn_pb2.py
|
chmod600/tl_classify
|
5f6393cf6834b426a8f7e6b8dd91baa0bdc9f575
|
[
"Apache-2.0"
] | 1
|
2018-08-16T17:10:57.000Z
|
2018-08-16T17:10:57.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/faster_rcnn.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_detection.protos import anchor_generator_pb2 as object__detection_dot_protos_dot_anchor__generator__pb2
from object_detection.protos import box_predictor_pb2 as object__detection_dot_protos_dot_box__predictor__pb2
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
from object_detection.protos import image_resizer_pb2 as object__detection_dot_protos_dot_image__resizer__pb2
from object_detection.protos import losses_pb2 as object__detection_dot_protos_dot_losses__pb2
from object_detection.protos import post_processing_pb2 as object__detection_dot_protos_dot_post__processing__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/faster_rcnn.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n)object_detection/protos/faster_rcnn.proto\x12\x17object_detection.protos\x1a.object_detection/protos/anchor_generator.proto\x1a+object_detection/protos/box_predictor.proto\x1a)object_detection/protos/hyperparams.proto\x1a+object_detection/protos/image_resizer.proto\x1a$object_detection/protos/losses.proto\x1a-object_detection/protos/post_processing.proto\"\x85\r\n\nFasterRcnn\x12\x1b\n\x10number_of_stages\x18\x01 \x01(\x05:\x01\x32\x12\x13\n\x0bnum_classes\x18\x03 \x01(\x05\x12<\n\rimage_resizer\x18\x04 \x01(\x0b\x32%.object_detection.protos.ImageResizer\x12N\n\x11\x66\x65\x61ture_extractor\x18\x05 \x01(\x0b\x32\x33.object_detection.protos.FasterRcnnFeatureExtractor\x12N\n\x1c\x66irst_stage_anchor_generator\x18\x06 \x01(\x0b\x32(.object_detection.protos.AnchorGenerator\x12\"\n\x17\x66irst_stage_atrous_rate\x18\x07 \x01(\x05:\x01\x31\x12X\n*first_stage_box_predictor_conv_hyperparams\x18\x08 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x30\n%first_stage_box_predictor_kernel_size\x18\t \x01(\x05:\x01\x33\x12,\n\x1f\x66irst_stage_box_predictor_depth\x18\n \x01(\x05:\x03\x35\x31\x32\x12\'\n\x1a\x66irst_stage_minibatch_size\x18\x0b \x01(\x05:\x03\x32\x35\x36\x12\x32\n%first_stage_positive_balance_fraction\x18\x0c \x01(\x02:\x03\x30.5\x12*\n\x1f\x66irst_stage_nms_score_threshold\x18\r \x01(\x02:\x01\x30\x12*\n\x1d\x66irst_stage_nms_iou_threshold\x18\x0e \x01(\x02:\x03\x30.7\x12&\n\x19\x66irst_stage_max_proposals\x18\x0f \x01(\x05:\x03\x33\x30\x30\x12/\n$first_stage_localization_loss_weight\x18\x10 \x01(\x02:\x01\x31\x12-\n\"first_stage_objectness_loss_weight\x18\x11 \x01(\x02:\x01\x31\x12\x19\n\x11initial_crop_size\x18\x12 \x01(\x05\x12\x1b\n\x13maxpool_kernel_size\x18\x13 \x01(\x05\x12\x16\n\x0emaxpool_stride\x18\x14 \x01(\x05\x12I\n\x1asecond_stage_box_predictor\x18\x15 \x01(\x0b\x32%.object_detection.protos.BoxPredictor\x12#\n\x17second_stage_batch_size\x18\x16 \x01(\x05:\x02\x36\x34\x12+\n\x1dsecond_stage_balance_fraction\x18\x17 \x01(\x02:\x04\x30.25\x12M\n\x1csecond_stage_post_processing\x18\x18 \x01(\x0b\x32\'.object_detection.protos.PostProcessing\x12\x30\n%second_stage_localization_loss_weight\x18\x19 \x01(\x02:\x01\x31\x12\x32\n\'second_stage_classification_loss_weight\x18\x1a \x01(\x02:\x01\x31\x12\x33\n(second_stage_mask_prediction_loss_weight\x18\x1b \x01(\x02:\x01\x31\x12\x45\n\x12hard_example_miner\x18\x1c \x01(\x0b\x32).object_detection.protos.HardExampleMiner\x12U\n second_stage_classification_loss\x18\x1d \x01(\x0b\x32+.object_detection.protos.ClassificationLoss\x12\'\n\x18inplace_batchnorm_update\x18\x1e \x01(\x08:\x05\x66\x61lse\x12)\n\x1ause_matmul_crop_and_resize\x18\x1f \x01(\x08:\x05\x66\x61lse\x12$\n\x15\x63lip_anchors_to_image\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1cuse_matmul_gather_in_matcher\x18! \x01(\x08:\x05\x66\x61lse\x12\x30\n!use_static_balanced_label_sampler\x18\" \x01(\x08:\x05\x66\x61lse\"x\n\x1a\x46\x61sterRcnnFeatureExtractor\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\'\n\x1b\x66irst_stage_features_stride\x18\x02 \x01(\x05:\x02\x31\x36\x12#\n\x14\x62\x61tch_norm_trainable\x18\x03 \x01(\x08:\x05\x66\x61lse')
,
dependencies=[object__detection_dot_protos_dot_anchor__generator__pb2.DESCRIPTOR,object__detection_dot_protos_dot_box__predictor__pb2.DESCRIPTOR,object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,object__detection_dot_protos_dot_image__resizer__pb2.DESCRIPTOR,object__detection_dot_protos_dot_losses__pb2.DESCRIPTOR,object__detection_dot_protos_dot_post__processing__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_FASTERRCNN = _descriptor.Descriptor(
name='FasterRcnn',
full_name='object_detection.protos.FasterRcnn',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='number_of_stages', full_name='object_detection.protos.FasterRcnn.number_of_stages', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_classes', full_name='object_detection.protos.FasterRcnn.num_classes', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_resizer', full_name='object_detection.protos.FasterRcnn.image_resizer', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='feature_extractor', full_name='object_detection.protos.FasterRcnn.feature_extractor', index=3,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_anchor_generator', full_name='object_detection.protos.FasterRcnn.first_stage_anchor_generator', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_atrous_rate', full_name='object_detection.protos.FasterRcnn.first_stage_atrous_rate', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_conv_hyperparams', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_conv_hyperparams', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_kernel_size', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_kernel_size', index=7,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_box_predictor_depth', full_name='object_detection.protos.FasterRcnn.first_stage_box_predictor_depth', index=8,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=512,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_minibatch_size', full_name='object_detection.protos.FasterRcnn.first_stage_minibatch_size', index=9,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_positive_balance_fraction', full_name='object_detection.protos.FasterRcnn.first_stage_positive_balance_fraction', index=10,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_nms_score_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_score_threshold', index=11,
number=13, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_nms_iou_threshold', full_name='object_detection.protos.FasterRcnn.first_stage_nms_iou_threshold', index=12,
number=14, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.7),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_max_proposals', full_name='object_detection.protos.FasterRcnn.first_stage_max_proposals', index=13,
number=15, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=300,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_localization_loss_weight', index=14,
number=16, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_objectness_loss_weight', full_name='object_detection.protos.FasterRcnn.first_stage_objectness_loss_weight', index=15,
number=17, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='initial_crop_size', full_name='object_detection.protos.FasterRcnn.initial_crop_size', index=16,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxpool_kernel_size', full_name='object_detection.protos.FasterRcnn.maxpool_kernel_size', index=17,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='maxpool_stride', full_name='object_detection.protos.FasterRcnn.maxpool_stride', index=18,
number=20, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_box_predictor', full_name='object_detection.protos.FasterRcnn.second_stage_box_predictor', index=19,
number=21, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_batch_size', full_name='object_detection.protos.FasterRcnn.second_stage_batch_size', index=20,
number=22, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=64,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_balance_fraction', full_name='object_detection.protos.FasterRcnn.second_stage_balance_fraction', index=21,
number=23, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.25),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_post_processing', full_name='object_detection.protos.FasterRcnn.second_stage_post_processing', index=22,
number=24, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_localization_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_localization_loss_weight', index=23,
number=25, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_classification_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss_weight', index=24,
number=26, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_mask_prediction_loss_weight', full_name='object_detection.protos.FasterRcnn.second_stage_mask_prediction_loss_weight', index=25,
number=27, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(1),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='hard_example_miner', full_name='object_detection.protos.FasterRcnn.hard_example_miner', index=26,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='second_stage_classification_loss', full_name='object_detection.protos.FasterRcnn.second_stage_classification_loss', index=27,
number=29, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inplace_batchnorm_update', full_name='object_detection.protos.FasterRcnn.inplace_batchnorm_update', index=28,
number=30, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_matmul_crop_and_resize', full_name='object_detection.protos.FasterRcnn.use_matmul_crop_and_resize', index=29,
number=31, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='clip_anchors_to_image', full_name='object_detection.protos.FasterRcnn.clip_anchors_to_image', index=30,
number=32, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_matmul_gather_in_matcher', full_name='object_detection.protos.FasterRcnn.use_matmul_gather_in_matcher', index=31,
number=33, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_static_balanced_label_sampler', full_name='object_detection.protos.FasterRcnn.use_static_balanced_label_sampler', index=32,
number=34, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=337,
serialized_end=2006,
)
_FASTERRCNNFEATUREEXTRACTOR = _descriptor.Descriptor(
name='FasterRcnnFeatureExtractor',
full_name='object_detection.protos.FasterRcnnFeatureExtractor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='object_detection.protos.FasterRcnnFeatureExtractor.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='first_stage_features_stride', full_name='object_detection.protos.FasterRcnnFeatureExtractor.first_stage_features_stride', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=16,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='batch_norm_trainable', full_name='object_detection.protos.FasterRcnnFeatureExtractor.batch_norm_trainable', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2008,
serialized_end=2128,
)
_FASTERRCNN.fields_by_name['image_resizer'].message_type = object__detection_dot_protos_dot_image__resizer__pb2._IMAGERESIZER
_FASTERRCNN.fields_by_name['feature_extractor'].message_type = _FASTERRCNNFEATUREEXTRACTOR
_FASTERRCNN.fields_by_name['first_stage_anchor_generator'].message_type = object__detection_dot_protos_dot_anchor__generator__pb2._ANCHORGENERATOR
_FASTERRCNN.fields_by_name['first_stage_box_predictor_conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_FASTERRCNN.fields_by_name['second_stage_box_predictor'].message_type = object__detection_dot_protos_dot_box__predictor__pb2._BOXPREDICTOR
_FASTERRCNN.fields_by_name['second_stage_post_processing'].message_type = object__detection_dot_protos_dot_post__processing__pb2._POSTPROCESSING
_FASTERRCNN.fields_by_name['hard_example_miner'].message_type = object__detection_dot_protos_dot_losses__pb2._HARDEXAMPLEMINER
_FASTERRCNN.fields_by_name['second_stage_classification_loss'].message_type = object__detection_dot_protos_dot_losses__pb2._CLASSIFICATIONLOSS
DESCRIPTOR.message_types_by_name['FasterRcnn'] = _FASTERRCNN
DESCRIPTOR.message_types_by_name['FasterRcnnFeatureExtractor'] = _FASTERRCNNFEATUREEXTRACTOR
FasterRcnn = _reflection.GeneratedProtocolMessageType('FasterRcnn', (_message.Message,), dict(
DESCRIPTOR = _FASTERRCNN,
__module__ = 'object_detection.protos.faster_rcnn_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnn)
))
_sym_db.RegisterMessage(FasterRcnn)
FasterRcnnFeatureExtractor = _reflection.GeneratedProtocolMessageType('FasterRcnnFeatureExtractor', (_message.Message,), dict(
DESCRIPTOR = _FASTERRCNNFEATUREEXTRACTOR,
__module__ = 'object_detection.protos.faster_rcnn_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.FasterRcnnFeatureExtractor)
))
_sym_db.RegisterMessage(FasterRcnnFeatureExtractor)
# @@protoc_insertion_point(module_scope)
| 60.002762
| 3,124
| 0.782699
|
d2610379b63f644a3eb4df3900e8a182ac7deea3
| 16,135
|
py
|
Python
|
old/extensions/httpDTN.py
|
ComputerNetworks-UFRGS/ManP2P-ng
|
41257f46c11e30c6aa663c67c791044c04bbf4e0
|
[
"MIT"
] | 1
|
2019-10-29T11:54:08.000Z
|
2019-10-29T11:54:08.000Z
|
old/extensions/httpDTN.py
|
ComputerNetworks-UFRGS/ManP2P-ng
|
41257f46c11e30c6aa663c67c791044c04bbf4e0
|
[
"MIT"
] | null | null | null |
old/extensions/httpDTN.py
|
ComputerNetworks-UFRGS/ManP2P-ng
|
41257f46c11e30c6aa663c67c791044c04bbf4e0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# # # # # # # # # # # # # # # # # # ## # # # # # # # # # # # # # # # # # # # ##
from extensionLoader import ExtensionLoader
from overlay import addHaltFunction as aHF
from commonRegex import HostName
from commandLineParser import CommandLineParser
from twisted.enterprise import adbapi
from twisted.internet import reactor
from twisted.python.failure import Failure
from hashlib import md5
from sys import stderr
from sqlite3 import Row
import time
import re
dtnGroup = None
bundles = None
#
# Auxiliary class
#
class DictRowledCPool(adbapi.ConnectionPool):
def connect(self):
conn = adbapi.ConnectionPool.connect(self)
conn.row_factory = Row
return conn
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Auxiliary functions and variables.
# Used all along the code in a DRY style
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dateFormat = '%a, %d %b %Y %H:%M:%S GMT'
def formatedTime(date=None):
if date is None:
return time.strftime(dateFormat, time.gmtime())
else:
return time.strftime(dateFormat, time.gmtime(date))
def rawTime(date=None):
if date is None:
return 0
return int(time.mktime(time.strptime(date, dateFormat)))
def processHeaders(header):
h = { }
# Separate the header between the request, the fields and the data.
fieldsStart = header.find('\n') + 1
contentStart = header.find('\n\n') + 2
# The resource requested
request = header[:fieldsStart].split()[1]
# Parse the fields and add them to the dictionary 'h'
for l in header[fieldsStart:contentStart].strip().split('\n'):
h[l[:l.find(':')]] = l[l.find(':')+1:].strip()
return h, request, header[contentStart:]
bundleBody = '''Host: {host}
Content-Destination: {contentDestination}
Content-Source: {contentSource}
Content-Length: {contentLength}
Content-Range: bytes {contentRange}/{contentRangeEnd}
Content-MD5: {contentMD5}
Date: {date}
{content}'''
def formatBundleBody(body, r, h):
return body.format(
contentSource = r[0],
contentDestination = r[1],
contentMD5 = r[2],
contentLength = r[3],
contentRange = r[4],
contentRangeEnd = r[4] + r[3],
host = h,
date = formatedTime(r[5]),
content = r[6],
)
nok = '''http-dtn:HTTP/1.1 404 Not Found
Date: {date}
'''
## # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Code related to GET requests
#
## # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
getBundleOk = 'http-dtn:HTTP/1.1 200 OK\n' + bundleBody
getNotFound = nok
def getBundles(txn, sParser):
# Auxiliary list for recording which of our records were delivered to its
# destination.
deliveredBundles = [ ]
# For every bundle we got with our request, lets issue a 200 OK message for
# the requester
for r in txn.execute('SELECT * FROM bundle').fetchall():
sParser.write(formatBundleBody(getBundleOk, r, sParser.myNick))
# Now, we are checking if we have delivered the bundle to its
# destination. If yes, lets put it in a list for removing it from
# our database and flag it as delivered.
if (sParser.remotePeerNick is not None
and sParser.remotePeerNick == r[1]):
deliveredBundles.append(r)
# If we have no bundles or if we have already sent the requester peer all
# the bundles we have, lets signal it to him through a 404 Not Found so it
# can get knowledge about it
sParser.write(getNotFound.format(date = formatedTime()))
# Remember the list of delivered bundles? Ok, now we'll remove the delivered
# ones from the 'bundle' table and put them on the 'delivered' table. No need
# to do more evaluation on the content of the list as it will be empty if we
# are not connected to the other peer.
for r in deliveredBundles:
txn.execute(
'INSERT INTO delivered values (?, ?, ?, ?);',
(r[0], r[1], r[2], int(time.time())))
txn.execute(
'DELETE FROM bundle WHERE source = ? AND \
destination = ? and md5 = ?;',
(r[0], r[1], r[2]))
getDeliveriesOk = '''http-dtn:HTTP/1.1 200 Ok
Date: {date}
Content-Source: {contentSource}
Content-Destination: {contentDestination}
Content-MD5: {contentMD5}
'''
def getDeliveries(txt, sParser):
# Lets say him all bundles we have delivered to their destinations
for r in txt.execute('SELECT * FROM delivered;').fetchall():
sParser.write(
getDeliveriesOk.format(
contentSource = r[0],
contentDestination = r[1],
contentMD5 = r[2],
date = r[3],))
# If we have not delivered nothing or if we already said him about all of
# the bundles we have delivered, lets issue a 404 Not Found.
sParser.write(getNotFound.format(date=formatedTime()))
getMustFields = [
'Date',
'Host',
]
def processGET(sParser, header):
fields, request, content = processHeaders(header)
# Searching for RFC MUST fields If they are not present, lets return a error
# for the requester. But not now :-)
for f in getMustFields:
if f not in fields:
pass
# Let's check what out requester have requested.
# Did he requested all bundles?
if request == '*':
bundles.runInteraction(
getBundles, sParser
).addBoth(
lambda r: None)
# Or did he requested what we had delivered in our random walks?
elif request == 'deliveries':
bundles.runInteraction(
getDeliveries, sParser
).addCallback(
getDeliveries, sParser)
# OMG, he requested something never thought before! Lets pass :-)
else:
pass
## # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Code related to PUT requests
#
## # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
putOk = '''http-dtn:HTTP/1.1 200 OK
Date: {date}
'''
putNok = nok
def putCallback(result, sParser=None):
# We'll check if we have successfully added the bundle to our database in
# which case we have a type(result) equal list, or, if we already get this
# bundle in our database, in which case we have a type(result) equal to
# Failure. In case we are sending a error message, result is None. Elsewhere,
# we have a unexpected message, so we stay shut and just ignore the dammed
# message :-)
if isinstance(result, list) == True:
m = putOk.format(date=formatedTime())
elif isinstance(result, Failure) == True:
m = putNok.format(date=formatedTime())
elif result is None:
m = putNok.format(date=formatedTime())
else:
m = None
if m is not None:
sParser.write(m)
return True
ftokens = { }
def addToken(token, callback):
if token in ftokens:
raise RuntimeError, "Token %s already in set" % (token)
assert callable(callback), "The callback is not callable"
ftokens[token] = callback
putMustFields = [
('Host', lambda s: HostName.match(s)),
('Content-Source', lambda s: HostName.match(s)),
('Content-Destination', lambda s: HostName.match(s) or s == '*'),
('Content-Length', lambda s: re.compile('[0-9]+').match(s)),
('Content-MD5', lambda s: re.compile('^[0-9a-z]{32}$').match(s)),
('Date', lambda s: True),
]
def processPUT(txn, sParser, header):
fields, request, content = processHeaders(header)
# Lets check the fields we have with those who are said as a MUST in our RFC
for f in putMustFields:
# If they are not present, we must raise a error. But not now
if f[0] not in fields:
return putCallback(None, sParser)
# Now, lets see if they are who they say they are. If they aren't, we
# shall raise a error but not now. For the while, we'll just drop the
# bundle by returning
if f[1](fields[f[0]]) is None:
return putCallback(None, sParser)
# We now should convert the date passed through Date: to a format that may
# be stored in our database, which is INTERGER
try:
fields['Date'] = rawTime(fields['date'])
# If we can't do the conversion, we can't store this bundle. Then, we shall
# return a error to the requester, but I don't know which of the error
# codes. We must discuss it.
except ValueError:
return putCallback(None, sParser)
# If we have a bundle in the 'delivered' table, it means that we have already
# delivered it to the destination
query = '''SELECT * FROM delivered WHERE
source = ? AND destination = ? AND md5 = ?'''
t = (
fields['Content-Source'],
fields['Content-Destination'],
fields['Content-MD5'],
)
# Checking if the bundle is in the 'delivered' table
if len(txn.execute(query, t).fetchall()) != 0:
return putCallback(None, sParser)
# Now it is time to store the bundle in our database. So, lets do it through
# a Deffer. However, if we are the destination of the bundle, lets process it
# content... How? I don't know yet :-(
if fields['Content-Destination'] not in [sParser.myNick, '*']:
bundles.runQuery(
'INSERT INTO bundle VALUES (?, ?, ?, ?, ?, ?, ?)', (
fields['Content-Source'],
fields['Content-Destination'],
fields['Content-MD5'],
fields['Content-Length'],
0,
fields['Date'],
content,)
).addBoth(
putCallback, sParser=sParser)
else:
bundles.runQuery(
'INSERT INTO delivered VALUES (?, ?, ?, ?)', (
fields['Content-Source'],
fields['Content-Destination'],
fields['Content-MD5'],
formatedTime(),)
).addBoth(
putCallback, sParser=sParser)
#
# So, a module must be environment aware. In other words, a module must
# be implemented considering a DTN and register it self with us.
try:
ftokens[
content[:content.find(':')]
](fields, content[:content.find(':')])
except KeyError:
print "Unknow token for DTN message <%s %s %s>" % (
fields['Content-Source'],
fields['Content-Destination'],
fields['Content-MD5']
)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Processing HTTP codes (like 200, 404, among other... or not)
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def processDelivery(txn, sParser, data):
fields, nouse, content = processHeaders(data)
whatMustHave = (
fields['Content-Source'],
fields['Content-Destination'],
fields['Content-MD5']
)
iStatement = 'INSERT INTO delivered VALUES (?, ?, ?, ?)'
dStatement = 'DELETE FROM bundle WHERE md5 = ?'
sStatement ='''SELECT md5 FROM bundle
WHERE
source = ? AND destination = ? AND md5 = ?'''
txn.execute(
dStatement, whatMustHave
).fetchall() if (
len (txn.execute(sStatement, whatMustHave).fetchall()) > 0
) else None
txn.execute(iStatement, whatMustHave + (formatedTime(),)).fetchall()
def processHTTPCode(txn, sParser, data):
fields, code, content = processHeaders(data)
try:
code = int(code)
except ValueError as e:
print >> stderr, 'HTTP return code not integer'
return
if code == 404:
return
elif code == 200:
isGetResponse = reduce(
lambda r,s: r and s,
map(lambda k: k[0] in fields,
putMustFields)
)
if isGetResponse:
processPUT(txn, None, data)
return
isDeliveryResponse = reduce(
lambda r,s: r and s,
map(lambda k: k in fields,
['Content-Source', 'Content-Destination', 'Content-MD5'])
)
if isDeliveryResponse:
processDelivery(txn, None, data)
return
'''Nothing to do this part on'''
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Handshaking
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
put = 'http-dtn:PUT * HTTP/1.1\n' + bundleBody
get = '''http-dtn:GET {thing} HTTP/1.1
Host: {host}
Date: {date}
'''
def putRequest(r, h):
return formatBundleBody(put, r, h)
def dtnHandshake(txn, sParser):
qDelivering = 'SELECT * FROM bundle WHERE destination = ? ORDER BY date;'
for r in txn.execute(qDelivering, (sParser.remotePeerNick,)).fetchall():
sParser.write(putRequest(r, sParser.myNick))
txn.execute(
'INSERT INTO delivered values (?, ?, ?, ?);',
(r[0], r[1], r[2], int(time.time()))
)
txn.execute(
'DELETE FROM bundle WHERE source = ? AND \
destination = ? and md5 = ?;',
(r[0], r[1], r[2])
)
# Ask for delivered bundles
sParser.write(get.format(
thing='deliveries', host=sParser.myNick, date=formatedTime()
))
# Ask for other bundles
sParser.write(get.format(
thing='*',host=sParser.myNick, date=formatedTime()
))
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Talking to the overlay
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def dtnParser(sParser, t):
m = t[1][:t[1].find(' ')]
if m == "GET":
processGET(sParser, t[1])
elif m == "PUT":
bundles.runInteraction(
processPUT, sParser, t[1]
).addBoth(
lambda r: True)
elif m == "HTTP/1.1":
bundles.runInteraction(
processHTTPCode, sParser, t[1]
).addBoth(
lambda r: True)
else:
'''WTF?'''
pass
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Auxiliar for adding and spreading bundles
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class BundleWriter(dict):
def __init__(self, prepend=''):
self.prepend = prepend
def write(self, destination, message):
self.message = message = {
'contentSource': sParser.myNick,
'contentDestination': destination,
'contentMD5': md5(message).hexdigest(),
'contentLength': len(message),
'date': formatedTime(),
'content': self.prepend + message,
# Not for storing
'host': sParser.myNick,
'contentRange': 0,
'contentRangeEnd': len(message)
}
return bundles.runQuery (
'INSERT INTO bundle VALUES (?, ?, ?, ?, ?, ?, ?)', (
message['contentSource'],
message['contentDestination'],
message['contentMD5'],
message['contentLength'],
0,
message['date'],
content,
)
).addCallback(self.spread)
def spread(self, result):
if not isinstance(result, list):
print '''For some reason we can't add the bundle into our DB'''
return
for r in result:
for p in dtnGroup:
p.transport.write(putRequest(r, r[0]))
return result
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Bureaucratic code
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def extensionName():
return str("HTTP-DTN")
def extendProtocol(lFactory, sFactory):
global dtnGroup, bundles
if not ExtensionLoader().isActive("Groups"):
return False
if not ExtensionLoader().isActive("BasicOverlay"):
return False
try:
dtnGroup = (ExtensionLoader().
getExtension("Groups").
getGroup("HTTP-DTN"))
except KeyError:
return False
bundles = DictRowledCPool(
"sqlite3",
CommandLineParser().getArguments().PeerName + '-http-dtn.db',
check_same_thread=False
)
bundles.runInteraction(
lambda r: r.executescript('''
-- 0 1 2 3 4 5 6
-- source, destination, md5, length, range, date, content
CREATE TABLE IF NOT EXISTS bundle (
source VARCHAR(32),
destination VARCHAR(32),
md5 VARCHAR(32),
length INTERGER,
range INTERGER CHECK (range >= 0),
date INTERGER CHECK (date > 0),
content BLOB,
PRIMARY KEY (source, destination, md5)
);
CREATE TABLE IF NOT EXISTS delivered (
source VARCHAR(32),
destination VARCHAR(32),
md5 VARCHAR(32),
date INTERGER CHECK (date > 0),
PRIMARY KEY (source, destination, md5)
);
'''))
dtnRE = (
'http-dtn:(({0})|({1}))'.format(
# Request headers
'(GET|PUT) ([0-9]+|\*) HTTP/1.1\n' +
'([-A-Za-z0-9]+:[^\n]+\n)+\n' +
'.*', # The content
# Response headers
'HTTP/1.1 [0-9]+ [- a-zA-Z0-9]+\n' +
'([-A-Za-z0-9]+: [^\n]+\n)+\n' +
'.*',
)
)
sFactory.addToken('http-dtn', dtnParser)
(lFactory.
getState('start').
addTransition('start', dtnRE, lambda t: (t[:8], t[9:])))
(lFactory.
getState('established').
addTransition('established', dtnRE, lambda t: (t[:8], t[9:])))
(ExtensionLoader().
getExtension("BasicOverlay").
addBootstrapFunction(
lambda sParser: bundles.runInteraction(dtnHandshake, sParser)))
return True
| 26.713576
| 80
| 0.608057
|
787c4e0ee9366c5ef308a4d9bb2264d7f8fa2686
| 231
|
py
|
Python
|
tranzact/wallet/puzzles/rom_bootstrap_generator.py
|
Tranzact-Network/tranzact-blockchain
|
692362155e46563aa70559123b93bc9379cac111
|
[
"Apache-2.0"
] | 8
|
2021-09-19T18:57:49.000Z
|
2022-02-09T04:32:50.000Z
|
tranzact/wallet/puzzles/rom_bootstrap_generator.py
|
Tranzact-Network/tranzact-blockchain
|
692362155e46563aa70559123b93bc9379cac111
|
[
"Apache-2.0"
] | 3
|
2021-09-29T10:56:48.000Z
|
2021-11-19T00:09:28.000Z
|
tranzact/wallet/puzzles/rom_bootstrap_generator.py
|
Tranzact-Network/tranzact-blockchain
|
692362155e46563aa70559123b93bc9379cac111
|
[
"Apache-2.0"
] | null | null | null |
from tranzact.types.blockchain_format.program import SerializedProgram
from .load_clvm import load_clvm
MOD = SerializedProgram.from_bytes(load_clvm("rom_bootstrap_generator.clvm").as_bin())
def get_generator():
return MOD
| 23.1
| 86
| 0.818182
|
2f65f9c23b360864b63226715b74a721c77b77f7
| 4,264
|
py
|
Python
|
nipyapi/nifi/models/controller_service_api.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/controller_service_api.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/controller_service_api.py
|
oneextrafact/nipyapi
|
4c184d69002a8ee3ac528fda63b2ffcc6cedbae5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.10.0
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ControllerServiceAPI(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'bundle': 'Bundle'
}
attribute_map = {
'type': 'type',
'bundle': 'bundle'
}
def __init__(self, type=None, bundle=None):
"""
ControllerServiceAPI - a model defined in Swagger
"""
self._type = None
self._bundle = None
if type is not None:
self.type = type
if bundle is not None:
self.bundle = bundle
@property
def type(self):
"""
Gets the type of this ControllerServiceAPI.
The fully qualified name of the service interface.
:return: The type of this ControllerServiceAPI.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ControllerServiceAPI.
The fully qualified name of the service interface.
:param type: The type of this ControllerServiceAPI.
:type: str
"""
self._type = type
@property
def bundle(self):
"""
Gets the bundle of this ControllerServiceAPI.
The details of the artifact that bundled this service interface.
:return: The bundle of this ControllerServiceAPI.
:rtype: Bundle
"""
return self._bundle
@bundle.setter
def bundle(self, bundle):
"""
Sets the bundle of this ControllerServiceAPI.
The details of the artifact that bundled this service interface.
:param bundle: The bundle of this ControllerServiceAPI.
:type: Bundle
"""
self._bundle = bundle
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ControllerServiceAPI):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.688312
| 479
| 0.550188
|
52c4329c40a4f29f5e122ca11b29d06ed16d99eb
| 680
|
py
|
Python
|
problems/A/CombinationLock.py
|
deveshbajpai19/CodeForces
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 55
|
2016-06-19T05:45:15.000Z
|
2022-03-31T15:18:53.000Z
|
problems/A/CombinationLock.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | null | null | null |
problems/A/CombinationLock.py
|
farhadcu/CodeForces-2
|
707b374f03012ec68054841f791d48b33ae4ef1b
|
[
"MIT"
] | 25
|
2016-07-29T13:03:15.000Z
|
2021-09-17T01:45:45.000Z
|
__author__ = 'Devesh Bajpai'
'''
https://codeforces.com/problemset/problem/540/A
Solution: We need to find the distance of each corresponding digit in the original and target
combination. The distance would be abs(original[i] - target[i]). The effective distance considering
both directions would be min(distance, 10 - distance).
'''
def solve(n, original, target):
moves = 0
for i in xrange(0, n):
distance = abs(int(original[i]) - int(target[i]))
moves += min(distance, 10 - distance)
return moves
if __name__ == "__main__":
n = int(raw_input())
original = raw_input()
target = raw_input()
print solve(n, original, target)
| 23.448276
| 99
| 0.676471
|
c1b450bd8f6aca877120c535fd6cd4303b4d747d
| 696
|
py
|
Python
|
precomputeNetMHC/ExtractPeptidesFromChains.py
|
mrForce/immunoGalaxy
|
8eabd8db6dfb94851087f61b5fd60bd192e2258b
|
[
"MIT"
] | null | null | null |
precomputeNetMHC/ExtractPeptidesFromChains.py
|
mrForce/immunoGalaxy
|
8eabd8db6dfb94851087f61b5fd60bd192e2258b
|
[
"MIT"
] | null | null | null |
precomputeNetMHC/ExtractPeptidesFromChains.py
|
mrForce/immunoGalaxy
|
8eabd8db6dfb94851087f61b5fd60bd192e2258b
|
[
"MIT"
] | null | null | null |
import argparse
import pickle
from Bio import SeqIO
from precomputedNetMHCIndex import peptideGenerator
parser = argparse.ArgumentParser(description='Extract peptides from chain file')
parser.add_argument('chains', help='path to chain file')
parser.add_argument('fasta', help='path to FASTA file')
parser.add_argument('output', help='Where to store peptides')
args= parser.parse_args()
chainCollection = None
with open(args.chains, 'rb') as f:
chainCollection = pickle.load(f)
pepGen = peptideGenerator(chainCollection, args.fasta, chainCollection.peptideLength)
g = open(args.output, 'w')
for peptide in pepGen:
seq = peptide.getPeptideSequence()
g.write(seq + '\n')
g.close()
| 29
| 85
| 0.764368
|
ce094765f3b9d9758eb514ef951f6c4a64c5b6e5
| 131
|
py
|
Python
|
config_files/dynamic_loading/config.py
|
brunocampos01/python
|
24d5773a2c76dcaf545b7b9086465d7127093662
|
[
"MIT"
] | 3
|
2019-04-24T15:08:23.000Z
|
2019-04-26T20:41:55.000Z
|
config_files/dynamic_loading/config.py
|
brunocampos01/python
|
24d5773a2c76dcaf545b7b9086465d7127093662
|
[
"MIT"
] | 1
|
2022-01-25T20:21:17.000Z
|
2022-01-25T20:21:17.000Z
|
config_files/dynamic_loading/config.py
|
brunocampos01/understanding-the-python-ecosystem
|
24d5773a2c76dcaf545b7b9086465d7127093662
|
[
"MIT"
] | 1
|
2019-04-22T03:51:33.000Z
|
2019-04-22T03:51:33.000Z
|
DATABASE_CONFIG = {
'host': 'TEST',
'dbname': 'company',
'user': 'user',
'password': 'password',
'port': 3306
}
| 18.714286
| 27
| 0.519084
|
531bd70adbb247bc639e47a587367c8a16831908
| 19,678
|
py
|
Python
|
example/mov_maker_strategy_demo.py
|
Bytom/mov_api_doc
|
484e995cf5a0b0e2423d62c6c0fef5362bc49157
|
[
"MIT"
] | null | null | null |
example/mov_maker_strategy_demo.py
|
Bytom/mov_api_doc
|
484e995cf5a0b0e2423d62c6c0fef5362bc49157
|
[
"MIT"
] | null | null | null |
example/mov_maker_strategy_demo.py
|
Bytom/mov_api_doc
|
484e995cf5a0b0e2423d62c6c0fef5362bc49157
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from copy import copy
from mov_sdk.mov_api import MovApi
from bmc_sdk.log_service import log_service_manager
from util import *
# from config import strategy_config, account_config
class SDKImpl(object):
def __init__(self, _guid, _private_key):
self.guid = _guid
self.private_key = _private_key
self.api = MovApi(self.guid, self.private_key)
def get_price(self, symbol):
data = self.api.get_depth(symbol)
if not self.check_error(data, "query_depth"):
asks = data["data"]["asks"]
bids = data["data"]["bids"]
asks.sort()
bids.sort(reverse=True)
if len(asks) > 0 and len(bids) > 0:
return float(asks[0][0]), float(bids[0][0])
return None, None
def get_exchange(self):
'''
:return:
'''
ret = {}
data = self.api.get_exchange_info()
if not self.check_error(data, "query_exchange"):
data = data["data"]
for d in data:
symbol = (d["base_asset"]["symbol"] + "_" + d["quote_asset"]["symbol"]).lower()
price_tick = 1.0 / pow(10, d["price_decimal"])
volume_tick = 1.0 / pow(10, d["amount_decimal"])
ret[symbol] = {"price_tick": price_tick, "volume_tick": volume_tick}
return ret
def get_account(self):
'''
需要注意,account 只有可用余额的数据!
:return:
'''
ret = {}
data = self.api.get_balance()
if not self.check_error(data, "query_balance"):
balances = data["data"]["balances"]
for dic in balances:
alias = dic["asset"]["symbol"]
ret[alias] = float(dic["balance"])
return ret
def send_order(self, symbol, side, price, volume):
try:
data_arr = self.api.send_order(symbol, side, price, volume)
if data_arr:
data = data_arr[-1]
if not self.check_error(data, "query_send_order"):
orders = data["data"]["orders"]
if len(orders) > 0:
d = orders[0]
sys_order_id = str(d["order_id"])
order = OrderData()
order.symbol = symbol
order.direction = side
order.order_id = str(sys_order_id)
order.price = price
order.volume = volume
order.traded = 0
order.status = Status.SUBMITTING.value # 报单状态
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
return order
except Exception as ex:
log_service_manager.write_log("[Info] send_order error ex:{}".format(ex))
def cancel_order(self, order_id):
log_service_manager.write_log("[Info] cancel:{}".format(order_id))
return self.api.cancel_order(order_id)
def query_open_orders(self, symbol):
data = self.api.query_open_orders(symbol)
ret = []
if not self.check_error(data, "query_open_orders"):
data = data["data"]
for d in data:
order = OrderData()
order.symbol = d["symbol"]
order.order_id = str(d["order_id"])
order.direction = d["side"]
order.price = float(d["open_price"])
order.volume = float(d["amount"])
order.traded = float(d["filled_amount"])
order.status = STATUS_MOV2VT[d["status"]]
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
ret.append(order)
return ret
def query_list_orders(self, order_id_list):
ret = []
data = self.api.query_list_orders(order_id_list)
if not self.check_error(data, "query_list_orders"):
data = data["data"]
for d in data:
order = OrderData()
order.symbol = d["symbol"]
order.order_id = str(d["order_id"])
order.direction = d["side"]
order.price = float(d["open_price"])
order.volume = float(d["amount"])
order.traded = float(d["filled_amount"])
order.status = STATUS_MOV2VT[d["status"]]
order.order_time = get_str_dt_use_timestamp(d["order_timestamp"], mill=1)
order.cancel_time = get_str_dt_use_timestamp(d["update_timestamp"], mill=1)
ret.append(order)
return ret
def check_error(self, data, func=""):
if int(data["code"]) == 200:
return False
error_code = data["code"]
error_msg = data["msg"]
log_service_manager.write_log(
"{} query_failed, code:{},information:{}".format(str(func), str(error_code), str(error_msg)))
return True
class MovMakerStrategy(object):
def __init__(self, _account_config, _config):
self.account_config = _account_config
self.guid = _account_config.get("guid", "")
self.private_key = _account_config.get("private_key", "")
self.impl = SDKImpl(self.guid, self.private_key)
self.config = _config
self.target_symbol, self.base_symbol = self.config["symbol_pair"].split('/')
self.exchange_info = {"pos_base_symbol": 0, "pos_target_symbol": 0}
self.put_order_dict = {}
self.buy_cover_order = None
self.sell_cover_order = None
self.avg_price_long = self.config["long_config"]["avg_price"]
self.position_long = self.config["long_config"]["now_position"]
self.avg_price_short = self.config["short_config"]["avg_price"]
self.position_short = self.config["short_config"]["now_position"]
self.cover_rate = 1 - self.config["exchange_info"]["fee_rate"]
self.ask = 0
self.bid = 0
def update_account(self):
try:
balance_dic = self.impl.get_account()
self.exchange_info["pos_target_symbol"] = balance_dic[self.target_symbol] * self.config["exchange_info"][
"pos_target_symbol_percent_use"]
self.exchange_info["pos_base_symbol"] = balance_dic[self.base_symbol] * self.config["exchange_info"][
"pos_base_symbol_percent_use"]
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,update_account ex:{}".format(ex))
def update_exchange(self):
try:
exchange_dic = self.impl.get_exchange()
dic = exchange_dic.get(self.config["symbol_pair"], {})
if dic:
self.exchange_info["price_tick"] = dic["price_tick"]
self.exchange_info["volume_tick"] = dic["volume_tick"]
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,update_exchange ex:{}".format(ex))
def get_now_has_order_ids(self):
ret = list(self.put_order_dict.keys())
if self.buy_cover_order:
ret.append(self.buy_cover_order.order_id)
if self.sell_cover_order:
ret.append(self.sell_cover_order.order_id)
return ret
def get_price_list(self, direction):
items = self.put_order_dict.items()
order_ids = [x[0] for x in items]
orders = [x[1] for x in items]
price_lists = [order.price for order in orders if order.direction == direction]
if direction == "sell":
price_lists.sort()
else:
price_lists.sort(reverse=True)
return price_lists
def cancel_not_belong_orders(self):
now_order_ids = self.get_now_has_order_ids()
orders = self.impl.query_open_orders(self.config["symbol_pair"])
for order in orders:
if order.order_id not in now_order_ids:
log_service_manager.write_log("cancel:{}".format(order.order_id))
self.impl.cancel_order(order.order_id)
def put_long_orders(self):
if self.position_long:
return
start_price = (self.bid + self.ask) / 2.0
start_volume = self.config["long_config"]["start_volume"] * self.exchange_info[
"pos_base_symbol"] / self.bid / 100.0
if not start_volume > 0:
return
inc_price = self.config["long_config"]["inc_spread"] * self.bid / 100.0
inc_volume = self.config["long_config"]["inc_volume"] * self.exchange_info["pos_base_symbol"] / self.bid / 100.0
left_base_amount = self.exchange_info["pos_base_symbol"]
start_price -= inc_price
log_service_manager.write_log(
"put_long_orders,start_price:{},start_volume:{},inc_price:{},inc_volume:{}".format(start_price,
start_volume, inc_price,
inc_volume))
order_list = []
ind = 0
now_price_list = self.get_price_list("buy")
len_all = len(now_price_list)
left_num = self.config["long_config"]["put_order_num"] - len_all
for i in range(self.config["long_config"]["put_order_num"]):
if left_num > 0 and start_price * start_volume < left_base_amount * 0.95:
if (len_all == 0) or (ind == 0 and start_price - inc_price >= now_price_list[0]) \
or (ind > 0 and ind + 1 == len_all and now_price_list[ind - 1] - inc_price >= start_price) \
or (ind > 0 and ind + 1 < len_all and now_price_list[ind - 1] - inc_price >= start_price
and start_price - inc_price >= now_price_list[ind]):
use_volume = get_round_order_price(start_volume, self.exchange_info["volume_tick"])
use_price = get_round_order_price(start_price, self.exchange_info["price_tick"])
order_list.append(("buy", use_price, use_volume))
left_base_amount -= use_volume * use_price
left_num -= 1
start_volume += inc_volume
start_price -= inc_price
else:
break
while ind < len_all and start_price < now_price_list[ind]:
ind += 1
self.send_order_list(order_list)
def put_short_orders(self):
if self.position_short:
return
start_price = (self.bid + self.ask) / 2.0
start_volume = self.config["short_config"]["start_volume"] * self.exchange_info["pos_target_symbol"] / 100.0
if not start_volume > 0:
return
inc_volume = self.config["short_config"]["inc_volume"] * self.exchange_info["pos_target_symbol"] / 100.0
inc_price = self.config["short_config"]["inc_spread"] * self.bid / 100.0
left_target_amount = self.exchange_info["pos_target_symbol"]
start_price += inc_price
log_service_manager.write_log(
"put_short_orders,start_price:{},start_volume:{},inc_price:{},inc_volume:{}".format(start_price,
start_volume, inc_price,
inc_volume))
order_list = []
ind = 0
now_price_list = self.get_price_list("sell")
len_all = len(now_price_list)
left_num = self.config["short_config"]["put_order_num"] - len_all
for i in range(self.config["short_config"]["put_order_num"]):
if left_num > 0 and start_volume < left_target_amount * 0.95:
if (len_all == 0) or (ind == 0 and start_price + inc_price <= now_price_list[0]) \
or (ind > 0 and ind + 1 == len_all and now_price_list[ind - 1] + inc_price >= start_price) \
or (ind > 0 and ind + 1 < len_all and now_price_list[ind - 1] + inc_price >= start_price
and start_price + inc_price <= now_price_list[ind + 1]):
use_volume = get_round_order_price(start_volume, self.exchange_info["volume_tick"])
use_price = get_round_order_price(start_price, self.exchange_info["price_tick"])
order_list.append(("sell", use_price, use_volume))
left_target_amount -= use_volume
left_num -= 1
start_volume += inc_volume
start_price += inc_price
else:
break
while ind < len_all and start_price > now_price_list[ind]:
ind += 1
self.send_order_list(order_list)
def send_order_list(self, order_list):
for side, price, volume in order_list:
order = self.impl.send_order(self.config["symbol_pair"], side, price, volume)
if order:
self.put_order_dict[order.order_id] = copy(order)
if side == "buy":
self.exchange_info["pos_base_symbol"] -= order.price * order.volume
else:
self.exchange_info["pos_target_symbol"] -= order.volume
def put_orders(self):
self.put_long_orders()
self.put_short_orders()
def compute_avg_price(self, new_trade_price, new_trade_volume, new_trade_direction):
if new_trade_direction == "buy":
self.avg_price_long = (self.avg_price_long * self.position_long + new_trade_price * new_trade_volume) / (
self.position_long + new_trade_volume)
self.position_long += new_trade_volume
else:
self.avg_price_short = (self.avg_price_short * self.position_short + new_trade_price * new_trade_volume) / (
self.position_short + new_trade_volume)
self.position_short += new_trade_volume
log_service_manager.write_log(
"[compute_avg_price] [long:{},{}] [short:{},{}]".format(self.avg_price_long, self.position_long,
self.avg_price_short,
self.position_short))
def cover_orders(self):
now_order_ids = self.get_now_has_order_ids()
orders = self.impl.query_list_orders(now_order_ids)
all_new_traded_long = 0
all_new_traded_short = 0
for order in orders:
bef_order = self.put_order_dict.get(order.order_id, None)
if bef_order:
new_traded = order.traded - bef_order.traded
self.put_order_dict[order.order_id] = copy(order)
if not order.is_active():
self.put_order_dict.pop(order.order_id)
new_return_frozen_volume = order.volume - order.traded
if order.direction == "buy":
self.exchange_info["pos_base_symbol"] += new_return_frozen_volume * order.price
else:
self.exchange_info["pos_target_symbol"] += new_return_frozen_volume
if new_traded > 0:
self.compute_avg_price(order.price, new_traded, order.direction)
if order.direction == "buy":
all_new_traded_long += new_traded
self.exchange_info["pos_base_symbol"] -= new_traded * order.price
self.exchange_info["pos_target_symbol"] += new_traded * (1 - self.cover_rate)
else:
all_new_traded_short += new_traded
self.exchange_info["pos_base_symbol"] += new_traded * order.price * (1 - self.cover_rate)
self.exchange_info["pos_target_symbol"] -= new_traded
if self.buy_cover_order and order.order_id == self.buy_cover_order.order_id:
new_traded = order.traded - self.buy_cover_order.traded
if new_traded > 0:
self.position_long -= new_traded
self.exchange_info["pos_base_symbol"] -= new_traded * self.buy_cover_order.price
self.exchange_info["pos_target_symbol"] += new_traded * (1 - self.cover_rate)
self.buy_cover_order = copy(order)
if not order.is_active():
new_return_frozen_volume = self.buy_cover_order.volume - self.buy_cover_order.traded
self.exchange_info["pos_base_symbol"] += new_return_frozen_volume * self.buy_cover_order.price
self.buy_cover_order = None
self.put_long_orders()
if self.sell_cover_order and order.order_id == self.sell_cover_order.order_id:
new_traded = order.traded - self.sell_cover_order.traded
if new_traded > 0:
self.position_long -= new_traded
self.exchange_info["pos_base_symbol"] += new_traded * self.sell_cover_order.price * (
1 - self.cover_rate)
self.exchange_info["pos_target_symbol"] -= new_traded
self.sell_cover_order = copy(order)
if not order.is_active():
new_return_frozen_volume = self.sell_cover_order.volume - self.sell_cover_order.traded
self.exchange_info["pos_target_symbol"] += new_return_frozen_volume
self.sell_cover_order = None
self.put_short_orders()
if all_new_traded_long > 0:
if self.sell_cover_order:
self.impl.cancel_order(self.sell_cover_order.order_id)
price = self.avg_price_short * (1 - self.config["short_config"]["profit_spread"] / 100.0)
self.sell_cover_order = self.impl.send_order(self.config["symbol_pair"], "sell", price,
abs(self.position_long))
log_service_manager.write_log(
"[cover_orders] [short:{},{}]".format(self.avg_price_short, self.position_short))
if all_new_traded_short > 0:
if self.buy_cover_order:
self.impl.cancel_order(self.buy_cover_order.order_id)
price = self.avg_price_long * (1 + self.config["long_config"]["profit_spread"] / 100.0)
self.buy_cover_order = self.impl.send_order(self.config["symbol_pair"], "buy", price,
abs(self.position_short))
log_service_manager.write_log("[cover_orders] [long:{},{}]".format(self.avg_price_long, self.position_long))
def run(self):
self.update_exchange()
self.update_account()
count = 0
while True:
try:
if count % 6 == 0:
self.cancel_not_belong_orders()
self.ask, self.bid = self.impl.get_price(self.config["symbol_pair"])
if self.ask and self.bid:
# log_service_manager.write_log("[Info] cover_orders")
self.cover_orders()
# log_service_manager.write_log("[Info] put_orders")
self.put_orders()
count += 1
except Exception as ex:
log_service_manager.write_log("[Error] MovMakerStrategy,run ex:{}".format(ex))
if __name__ == "__main__":
s = MovMakerStrategy(account_config, strategy_config)
s.run()
| 46.520095
| 120
| 0.567944
|
c62965e67e38ef1efc0d370ff19a3f412612dadb
| 186
|
py
|
Python
|
test_time_tracker.py
|
sosiax/track
|
4988d2f1d7701f8b8cd6ca8f17d9d829a4dd712e
|
[
"Apache-2.0"
] | 50
|
2015-05-10T13:59:02.000Z
|
2021-07-12T08:06:51.000Z
|
test_time_tracker.py
|
sosiax/track
|
4988d2f1d7701f8b8cd6ca8f17d9d829a4dd712e
|
[
"Apache-2.0"
] | 17
|
2015-04-29T10:49:51.000Z
|
2019-07-31T12:50:56.000Z
|
test_time_tracker.py
|
sosiax/track
|
4988d2f1d7701f8b8cd6ca8f17d9d829a4dd712e
|
[
"Apache-2.0"
] | 11
|
2015-08-20T09:43:07.000Z
|
2020-03-03T14:41:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from track_base import time_tracker
def test_time_tracker():
aa = time_tracker()
if __name__ == '__main__':
test_time_tracker()
| 16.909091
| 35
| 0.682796
|
949662d56f1eb98416a327d4b2fdc37242e1250c
| 293
|
py
|
Python
|
aldryn_blog/cms_app.py
|
ForumDev/aldryn-blog
|
c83c52317f60a23453fe43a1e72f2101e2e6049b
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_blog/cms_app.py
|
ForumDev/aldryn-blog
|
c83c52317f60a23453fe43a1e72f2101e2e6049b
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_blog/cms_app.py
|
ForumDev/aldryn-blog
|
c83c52317f60a23453fe43a1e72f2101e2e6049b
|
[
"BSD-3-Clause"
] | 1
|
2020-10-12T06:41:22.000Z
|
2020-10-12T06:41:22.000Z
|
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BlogApp(CMSApp):
name = _('News')
urls = ['aldryn_blog.urls']
app_name = 'aldryn_blog'
apphook_pool.register(BlogApp)
| 22.538462
| 55
| 0.730375
|
fecd00e7e5efbdf46a8db8002984841d268d7ed4
| 918
|
py
|
Python
|
lib_collection/string/boyer_moore.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/string/boyer_moore.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
lib_collection/string/boyer_moore.py
|
caser789/libcollection
|
eb0a6fc36ce1cb57ed587865bbc1576e81c08924
|
[
"MIT"
] | null | null | null |
def find(haystack, needle):
"""
>>> find("ll", "hello")
-1
>>> find("", "")
0
>>> find("hello", "ll")
2
>>> find("aaaaabba", "bba")
5
>>> find("bbaaaaaa", "bba")
0
>>> find("aaaaa", "bba")
-1
"""
m = len(haystack)
n = len(needle)
if m < n:
return -1
if m == n == 0:
return 0
# compute skip table
R = 256
right = [-1 for _ in range(R)]
for i in range(n):
right[ord(needle[i])] = i
# search
i = 0
while i <= m - n:
skip = 0
for j in range(n-1, -1, -1):
if needle[j] != haystack[i+j]:
skip = j - right[ord(haystack[i+j])]
if skip < 1:
skip = 1
break
if skip == 0:
return i
i += skip
return -1
if __name__ == '__main__':
import doctest
doctest.testmod()
| 17.653846
| 52
| 0.405229
|
8571670ab9f842cfded7796d3b398c62c300633c
| 6,800
|
py
|
Python
|
app/oauth/settings.py
|
larrycameron80/PwnAuth
|
3c73347175516ffe00df6607a35f0c4d17a7022e
|
[
"Apache-2.0"
] | 304
|
2018-05-21T17:28:34.000Z
|
2021-09-11T21:36:05.000Z
|
app/oauth/settings.py
|
larrycameron80/PwnAuth
|
3c73347175516ffe00df6607a35f0c4d17a7022e
|
[
"Apache-2.0"
] | 9
|
2018-05-22T17:08:51.000Z
|
2021-06-10T20:17:36.000Z
|
app/oauth/settings.py
|
lunarobliq/PwnAuth
|
2074e0236fa83c4d57ac9a1eed64921b7b99fca2
|
[
"Apache-2.0"
] | 76
|
2018-05-21T18:19:20.000Z
|
2021-06-04T05:13:42.000Z
|
"""
Django settings for oauth project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import logging
import logging.config
import logging.handlers
import os
if os.getenv('DOCKER_CONTAINER'):
POSTGRES_HOST = 'db'
else:
POSTGRES_HOST = '127.0.0.1'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'CHANGEME')
# SECURITY WARNING: don't run with debug turned on in production!
if os.getenv('DJANGO_ENV') == 'prod':
DEBUG = False
ALLOWED_HOSTS = [os.getenv('DJANGO_SITE')]
else:
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
SITE_ID = 1
OAUTH_MODULES = [
'oauth_office365.apps.OauthOffice365Config',
'oauth_manager.apps.OauthManagerConfig',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework_swagger',
'sslserver',
] + OAUTH_MODULES
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.sites.middleware.CurrentSiteMiddleware'
]
ROOT_URLCONF = 'oauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oauth.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'oauthdb',
'USER': 'oauth',
'PASSWORD': 'oauth',
'HOST': POSTGRES_HOST,
'PORT': '5432',
}
}
MIGRATION_MODULES = {
'sites': 'oauth.migrations.site_migrations',
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/opt/app/static'
# Rest Framework Global Settings
REST_FRAMEWORK = {
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.TemplateHTMLRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
)
}
LOGIN_REDIRECT_URL = '/oauth'
LOGIN_URL = 'login'
LOGOUT_REDIRECT_URL = 'login'
class RequireAuditFilter(logging.Filter):
def filter(self, record):
if record.levelname == 'INFO':
return True
return False
class RequireDebugLevel(logging.Filter):
def filter(self, record):
return record.levelname == 'DEBUG'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'action': {
'format': '%(asctime)s:::%(name)s:::%(user)s:::%(message)s'
}
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
},
'require_audit': {
'()': 'oauth.settings.RequireAuditFilter'
},
'require_debug_level': {
'()': 'oauth.settings.RequireDebugLevel'
}
},
'handlers': {
'email_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
},
'error_log': {
'level': 'ERROR',
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.getenv('ERROR_LOG', 'error.log'),
'when': 'midnight',
'backupCount': 0,
'formatter': 'verbose'
},
'debug_log': {
'level': 'DEBUG',
'filters': ['require_debug_true', ],
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.getenv('DEBUG_LOG', 'debug.log'),
'when': 'midnight',
'backupCount': 0,
'formatter': 'simple'
},
'audit_log': {
'level': 'INFO',
'filters': ['require_audit'],
'class': 'logging.handlers.TimedRotatingFileHandler',
'filename': os.getenv('AUDIT_LOG', 'audit.log'),
'when': 'midnight',
'backupCount': 0,
'formatter': 'action'
},
},
'loggers': {
'django': {
'handlers': ['error_log', 'debug_log'],
'propagate': True
},
'django.request': {
'handlers': ['error_log', 'email_admins'],
'level': 'ERROR',
'propagate': False
},
'oauth': {
'handlers': ['audit_log', 'error_log', 'debug_log'],
'level': 'INFO',
'propagate': False
}
}
}
| 26.5625
| 95
| 0.610588
|
d3cc99d591609eaf8a1bb0fe6e317bac5f2c54a5
| 849
|
py
|
Python
|
backend/legi/management/commands/createindex.py
|
mgax/jshacks-challenge-legi
|
cc6630fc11db225c131124b1679fe0bbf7687392
|
[
"MIT"
] | 2
|
2017-03-12T12:13:16.000Z
|
2017-03-16T08:34:25.000Z
|
backend/legi/management/commands/createindex.py
|
gabriel-v/protolegi
|
0b45b719dcc7a6ae5e5f4781fbcce9c171d636e3
|
[
"MIT"
] | null | null | null |
backend/legi/management/commands/createindex.py
|
gabriel-v/protolegi
|
0b45b719dcc7a6ae5e5f4781fbcce9c171d636e3
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.conf import settings
from elasticsearch import Elasticsearch
es = Elasticsearch(settings.ELASTICSEARCH_URL)
MAPPINGS = {
"document": {
"properties": {
"id": {"type": "string", "index": "not_analyzed"},
}
}
}
SETTINGS = {
"analysis": {
"analyzer": {
"default": {
"tokenizer": "standard",
"filter": ["standard", "lowercase", "asciifolding"],
}
}
}
}
class Command(BaseCommand):
help = "Reset the elasticsearch index"
def handle(self, **options):
es.indices.delete(settings.ELASTICSEARCH_INDEX, ignore=[400, 404])
es.indices.create(settings.ELASTICSEARCH_INDEX, {
"mappings": MAPPINGS,
"settings": SETTINGS,
})
| 24.257143
| 74
| 0.574794
|
410b3a4f11152d85d99d77afc00cb4683509bb04
| 5,801
|
py
|
Python
|
server.py
|
pegasystems/docker-mock-web-service
|
e82823d07e15a6fe90f780d44c017335389abdae
|
[
"Apache-2.0"
] | 3
|
2021-01-14T17:16:17.000Z
|
2022-01-05T17:01:14.000Z
|
server.py
|
pegasystems/docker-mock-web-service
|
e82823d07e15a6fe90f780d44c017335389abdae
|
[
"Apache-2.0"
] | 2
|
2020-11-03T12:54:03.000Z
|
2021-01-14T15:06:28.000Z
|
server.py
|
pegasystems/docker-mock-web-service
|
e82823d07e15a6fe90f780d44c017335389abdae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from gevent import monkey
monkey.patch_all()
from flask import Flask
from gevent.pywsgi import WSGIServer
from prometheus_flask_exporter import PrometheusMetrics
from flask import request, redirect, url_for, make_response
import time
import json
import socket
import requests
import urllib3
import pg8000
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
app = Flask(__name__)
metrics = PrometheusMetrics(app)
from werkzeug.middleware.proxy_fix import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app, x_proto=1, x_host=1)
invocationCount = 0
healthResponseCode = 200
MESSAGE_FIELD = 'message'
DEFAULT_MESSAGE = 'Hello, World!'
SECONDS = 'seconds'
METHOD = 'method'
RESPONSECODE = 'responsecode'
COUNT = 'count'
HOSTNAME = 'hostname'
HOSTIP = 'hostip'
TARGET_FIELD = 'target'
DATA_FIELD = 'data'
PORT_FIELD = 'port'
SUCCESS_FIELD = 'success'
USER_FIELD = 'user'
PASSWORD_FIELD = 'password'
KEY_FIELD = 'key'
VALUE_FIELD = 'value'
PATH_ROOT = '/'
PATH_DELAY = '/delay'
PATH_ECHO = '/echo'
PATH_CODE = '/code'
PATH_COUNT = '/count'
PATH_HEALTH = '/health'
PATH_SET_HEALTH = '/sethealth'
PATH_HEADERS = '/headers'
PATH_REDIRECT = '/redirect'
PATH_HOSTINFO = '/hostinfo'
PATH_REQUEST = '/request'
PATH_TCP = '/tcp'
PATH_POSTGRES = '/postgres'
PATH_WITH_TENANT = '/path/'
PATH_COOKIE = '/cookies'
@app.route(PATH_ROOT)
def index():
return json.dumps({MESSAGE_FIELD: DEFAULT_MESSAGE})
@app.route(PATH_DELAY)
def slow():
seconds = request.args.get(SECONDS, default=5, type=int)
time.sleep(seconds)
return json.dumps({SECONDS: seconds})
@app.route(PATH_ECHO, methods=['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'CONNECT', 'OPTIONS', 'TRACE', 'PATCH'])
def echo():
arg = request.args.get(MESSAGE_FIELD, default="", type=str)
return json.dumps({METHOD: request.method, MESSAGE_FIELD: arg})
@app.route(PATH_CODE)
def code():
code = request.args.get(RESPONSECODE, default=200, type=int)
return json.dumps({RESPONSECODE: code}), code
@app.route(PATH_COUNT)
def count():
global invocationCount
invocationCount += 1
return json.dumps({COUNT: invocationCount}), 200
@app.route(PATH_HEALTH)
def health():
return "", healthResponseCode
@app.route(PATH_SET_HEALTH)
def sethealth():
code = request.args.get(RESPONSECODE, default=200, type=int)
global healthResponseCode
healthResponseCode = code
return "", 200
@app.route(PATH_HEADERS)
def headers():
response = {'headers': dict(request.headers)}
return json.dumps(response), 200
@app.route(PATH_HOSTINFO)
def host_info():
hostname = socket.gethostname()
ip_addr = socket.gethostbyname(hostname)
return json.dumps({HOSTNAME: hostname, HOSTIP: ip_addr}), 200
@app.route(PATH_REDIRECT)
def redir():
return redirect(url_for("index"))
@app.route(PATH_REQUEST, methods=['POST'])
def sendRequest():
data = json.loads(request.data)
try:
resp = requests.get(data[TARGET_FIELD], verify=False, timeout=5)
return json.dumps({SUCCESS_FIELD: True, RESPONSECODE: resp.status_code, DATA_FIELD: resp.text}), 200
except requests.exceptions.ConnectionError as ncr:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: "connection_error"}), 200
except ConnectionRefusedError as cr:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: "connection_refused"}), 200
except socket.timeout:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: "timed_out"}), 200
except Exception as inst:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: str(inst)}), 200
@app.route(PATH_TCP, methods=['POST'])
def tcpConnect():
data = json.loads(request.data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((data[TARGET_FIELD], data[PORT_FIELD]))
return json.dumps({SUCCESS_FIELD: True}), 200
except ConnectionRefusedError as cr:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: "connection_refused"}), 200
except socket.timeout:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: "timed_out"}), 200
except Exception as inst:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: str(inst)}), 200
finally:
s.close()
@app.route(PATH_POSTGRES, methods=['POST'])
def testpostgres():
conn = None
try:
data = json.loads(request.data)
conn = pg8000.connect(user=data[USER_FIELD], password=data[PASSWORD_FIELD], host=data[TARGET_FIELD])
return json.dumps({SUCCESS_FIELD: True}), 200
except Exception as inst:
return json.dumps({SUCCESS_FIELD: False, MESSAGE_FIELD: str(inst)}), 200
finally:
if conn != None:
conn.close()
# Search service
@app.route(PATH_WITH_TENANT)
@app.route(PATH_WITH_TENANT + '<string:tenant_id>/')
def search_url_get(tenant_id=None):
response_code = request.args.get(RESPONSECODE, default=200, type=int)
return json.dumps({RESPONSECODE: response_code, "tenantID": tenant_id}), response_code
@app.route(PATH_WITH_TENANT + '<string:tenant_id>/<string:action>', methods=['POST'])
def search_url_post(tenant_id, action):
return json.dumps({"Status": "Ok", "tenantID": tenant_id, "Action": action}), 200
@app.route(PATH_COOKIE, methods=['GET','POST'])
def cookie():
key = request.args.get(KEY_FIELD, default="", type=str)
val = request.args.get(VALUE_FIELD, default="", type=str)
resp = make_response(json.dumps({"cookies":request.cookies}))
if (key != ""):
resp.set_cookie(key, val)
return resp
if __name__ == '__main__':
redirect_server = WSGIServer(('0.0.0.0', 8089), app)
redirect_server.start()
redirect_server = WSGIServer(('0.0.0.0', 8080), app)
redirect_server.serve_forever()
| 28.024155
| 111
| 0.708499
|
405b129e23e4e4ced0172f2359495881ff601c76
| 4,422
|
py
|
Python
|
fortnox/services/order_services.py
|
xalien10/fortnox-python
|
7c5fe29a8adaa5a21288df4495996e20515ba8a7
|
[
"MIT"
] | 21
|
2020-03-21T14:49:33.000Z
|
2022-02-02T12:46:08.000Z
|
fortnox/services/order_services.py
|
xalien10/fortnox-python
|
7c5fe29a8adaa5a21288df4495996e20515ba8a7
|
[
"MIT"
] | 5
|
2020-07-03T18:55:48.000Z
|
2021-11-02T10:25:32.000Z
|
fortnox/services/order_services.py
|
xalien10/fortnox-python
|
7c5fe29a8adaa5a21288df4495996e20515ba8a7
|
[
"MIT"
] | 3
|
2020-06-08T06:23:50.000Z
|
2021-06-10T18:28:32.000Z
|
class OrderService(object):
"""
:class:`fortnox.OrderService` is used by :class:`fortnox.Client` to make
actions related to Order resource.
Normally you won't instantiate this class directly.
"""
"""
Allowed attributes for Order to send to Fortnox backend servers.
"""
OPTS_KEYS_TO_PERSIST = ['CustomerNumber', 'OrderRows']
"""
OrderRows has the following structures:
"OrderRows": [
{
"ArticleNumber": "11",
"DeliveredQuantity": "10",
"Description": "Trycksak: 4 sid A4",
"OrderedQuantity": "10",
"Unit": "st"
},
{
"ArticleNumber": "12",
"DeliveredQuantity": "10",
"Description": "Trycksak: 4 sid A4",
"OrderedQuantity": "10",
"Unit": "st"
},
..................
]
"""
SERVICE = "Order"
def __init__(self, http_client):
"""
:param :class:`fortnox.HttpClient` http_client: Pre configured high-level http client.
"""
self.__http_client = http_client
@property
def http_client(self):
return self.__http_client
def list(self, **params):
"""
Retrieve all Order
Returns all Order available to the Company, according to the parameters provided
:calls: ``get /orders``
:param dict params: (optional) Search options.
:return: List of dictionaries that support attriubte-style access, which represent collection of Order.
:rtype: list
"""
_, _, orders = self.http_client.get("/orders", params=params)
return orders
def retrieve(self, document_number):
"""
Retrieve a single Order
Returns a single Order according to the unique Order ID provided
If the specified Order does not exist, this query returns an error
:calls: ``get /orders/{document_number}``
:param int id: Unique identifier of a Order.
:return: Dictionary that support attriubte-style access and represent Order resource.
:rtype: dict
"""
_, _, order = self.http_client.get("/orders/{document_number}".format(document_number=document_number))
return order
def create(self, *args, **kwargs):
"""
Create a Order
Creates a new Order
**Notice** the Order's name **must** be unique within the scope of the resource_type
:calls: ``post /orders``
:param tuple *args: (optional) Single object representing Order resource.
:param dict **kwargs: (optional) order attributes.
:return: Dictionary that support attriubte-style access and represents newely created Order resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for Order are missing')
initial_attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in initial_attributes.items())
attributes.update({'service': self.SERVICE})
_, _, order = self.http_client.post("/orders", body=attributes)
return order
def update(self, document_number, *args, **kwargs):
"""
Update a Order
Updates a Order's information
If the specified Order does not exist, this query will return an error
**Notice** if you want to update a Order, you **must** make sure the Order's name is unique within the scope of the specified resource
:calls: ``put /orders/{document_number}``
:param int id: Unique identifier of a Order.
:param tuple *args: (optional) Single object representing Order resource which attributes should be updated.
:param dict **kwargs: (optional) Order attributes to update.
:return: Dictionary that support attriubte-style access and represents updated Order resource.
:rtype: dict
"""
if not args and not kwargs:
raise Exception('attributes for Order are missing')
attributes = args[0] if args else kwargs
attributes = dict((k, v) for k, v in attributes.items())
attributes.update({'service': self.SERVICE})
_, _, order = self.http_client.put("/orders/{document_number}".format(document_number=document_number),
body=attributes)
return order
| 35.095238
| 142
| 0.608322
|
0b358a45217a9f38150280dadf261efe1e634aef
| 1,783
|
py
|
Python
|
pyaz/sql/midb/short_term_retention_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/sql/midb/short_term_retention_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | null | null | null |
pyaz/sql/midb/short_term_retention_policy/__init__.py
|
py-az-cli/py-az-cli
|
9a7dc44e360c096a5a2f15595353e9dad88a9792
|
[
"MIT"
] | 1
|
2022-02-03T09:12:01.000Z
|
2022-02-03T09:12:01.000Z
|
from .... pyaz_utils import _call_az
def set(managed_instance, name, resource_group, retention_days, deleted_time=None, no_wait=None):
'''
Update short term retention for automated backups on a single database.
Required Parameters:
- managed_instance -- Name of the Azure SQL managed instance.
- name -- The name of the Azure SQL Managed Database.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- retention_days -- New backup short term retention policy in days.Valid policy for live database is 7-35 days, valid policy for dropped databases is 0-35 days.
Optional Parameters:
- deleted_time -- If specified, updates retention days for a deleted database, instead of an existing database.Must match the deleted time of a deleted database on the source Managed Instance.
- no_wait -- Do not wait for the long-running operation to finish.
'''
return _call_az("az sql midb short-term-retention-policy set", locals())
def show(managed_instance, name, resource_group, deleted_time=None):
'''
Show short term retention for automated backups on a single database.
Required Parameters:
- managed_instance -- Name of the Azure SQL managed instance.
- name -- The name of the Azure SQL Managed Database.
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- deleted_time -- If specified, shows retention days for a deleted database, instead of an existing database.Must match the deleted time of a deleted database on the source Managed Instance.
'''
return _call_az("az sql midb short-term-retention-policy show", locals())
| 52.441176
| 196
| 0.741447
|
f41ab82fc3dd141ca5d9cbbd76926da9b6240aaa
| 40,390
|
py
|
Python
|
core/domain/topic_services.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
core/domain/topic_services.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
core/domain/topic_services.py
|
cclauss/oppia
|
7ad9d06e434c589f0aaa015252ca65872557b7eb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.]
"""Commands for operations on topics, and related models."""
import collections
import copy
import logging
from core.domain import role_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import user_services
from core.platform import models
import feconf
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
datastore_services = models.Registry.import_datastore_services()
memcache_services = models.Registry.import_memcache_services()
def _migrate_subtopics_to_latest_schema(versioned_subtopics):
"""Holds the responsibility of performing a step-by-step, sequential update
of the subtopics structure based on the schema version of the input
subtopics dictionary. If the current subtopics schema changes, a
new conversion function must be added and some code appended to this
function to account for that new version.
Args:
versioned_subtopics: A dict with two keys:
- schema_version: int. The schema version for the subtopics dict.
- subtopics: list(dict). The list of dicts comprising the topic's
subtopics.
Raises:
Exception: The schema version of subtopics is outside of what
is supported at present.
"""
subtopic_schema_version = versioned_subtopics['schema_version']
if not (1 <= subtopic_schema_version
<= feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
raise Exception(
'Sorry, we can only process v1-v%d subtopic schemas at '
'present.' % feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
while (subtopic_schema_version <
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
topic_domain.Topic.update_subtopics_from_model(
versioned_subtopics, subtopic_schema_version)
subtopic_schema_version += 1
# Repository GET methods.
def _get_topic_memcache_key(topic_id, version=None):
"""Returns a memcache key for the topic.
Args:
topic_id: str. ID of the topic.
version: int. The version of the topic.
Returns:
str. The memcache key of the topic.
"""
if version:
return 'topic-version:%s:%s' % (topic_id, version)
else:
return 'topic:%s' % topic_id
def get_topic_from_model(topic_model):
"""Returns a topic domain object given a topic model loaded
from the datastore.
Args:
topic_model: TopicModel. The topic model loaded from the
datastore.
Returns:
topic. A Topic domain object corresponding to the given
topic model.
"""
versioned_subtopics = {
'schema_version': topic_model.subtopic_schema_version,
'subtopics': copy.deepcopy(topic_model.subtopics)
}
if (topic_model.subtopic_schema_version !=
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION):
_migrate_subtopics_to_latest_schema(versioned_subtopics)
return topic_domain.Topic(
topic_model.id, topic_model.name,
topic_model.description, topic_model.canonical_story_ids,
topic_model.additional_story_ids, topic_model.uncategorized_skill_ids,
[
topic_domain.Subtopic.from_dict(subtopic)
for subtopic in versioned_subtopics['subtopics']
],
versioned_subtopics['schema_version'],
topic_model.next_subtopic_id,
topic_model.language_code,
topic_model.version, topic_model.created_on,
topic_model.last_updated)
def get_all_topic_summaries():
"""Returns the summaries of all topics present in the datastore.
Returns:
list(TopicSummary). The list of summaries of all topics present in the
datastore.
"""
topic_summaries_models = topic_models.TopicSummaryModel.get_all()
topic_summaries = [
get_topic_summary_from_model(summary)
for summary in topic_summaries_models]
return topic_summaries
def get_all_skill_ids_assigned_to_some_topic():
"""Returns the ids of all the skills that are linked to some topics.
Returns:
set([str]). The ids of all the skills linked to some topic.
"""
skill_ids = set([])
all_topic_models = topic_models.TopicModel.get_all()
all_topics = [get_topic_from_model(topic) for topic in all_topic_models]
for topic in all_topics:
skill_ids.update(topic.get_all_skill_ids())
return skill_ids
def get_topic_summary_from_model(topic_summary_model):
"""Returns a domain object for an Oppia topic summary given a
topic summary model.
Args:
topic_summary_model: TopicSummaryModel.
Returns:
TopicSummary.
"""
return topic_domain.TopicSummary(
topic_summary_model.id, topic_summary_model.name,
topic_summary_model.canonical_name,
topic_summary_model.language_code,
topic_summary_model.version,
topic_summary_model.canonical_story_count,
topic_summary_model.additional_story_count,
topic_summary_model.uncategorized_skill_count,
topic_summary_model.subtopic_count,
topic_summary_model.total_skill_count,
topic_summary_model.topic_model_created_on,
topic_summary_model.topic_model_last_updated
)
def get_topic_by_id(topic_id, strict=True, version=None):
"""Returns a domain object representing a topic.
Args:
topic_id: str. ID of the topic.
strict: bool. Whether to fail noisily if no topic with the given
id exists in the datastore.
version: int or None. The version number of the topic to be
retrieved. If it is None, the latest version will be retrieved.
Returns:
Topic or None. The domain object representing a topic with the
given id, or None if it does not exist.
"""
topic_memcache_key = _get_topic_memcache_key(topic_id, version=version)
memcached_topic = memcache_services.get_multi(
[topic_memcache_key]).get(topic_memcache_key)
if memcached_topic is not None:
return memcached_topic
else:
topic_model = topic_models.TopicModel.get(
topic_id, strict=strict, version=version)
if topic_model:
topic = get_topic_from_model(topic_model)
memcache_services.set_multi({topic_memcache_key: topic})
return topic
else:
return None
def get_topics_by_ids(topic_ids):
"""Returns a list of topics matching the IDs provided.
Args:
topic_ids: list(str). List of IDs to get topics for.
Returns:
list(Topic|None). The list of topics corresponding to given ids
(with None in place of topic ids corresponding to deleted topics).
"""
all_topic_models = topic_models.TopicModel.get_multi(topic_ids)
topics = [
get_topic_from_model(topic_model) if topic_model is not None else None
for topic_model in all_topic_models]
return topics
def get_topic_by_name(topic_name):
"""Returns a domain object representing a topic.
Args:
topic_name: str. The name of the topic.
Returns:
Topic or None. The domain object representing a topic with the
given id, or None if it does not exist.
"""
topic_model = topic_models.TopicModel.get_by_name(topic_name)
if topic_model is None:
return None
topic = get_topic_from_model(topic_model)
return topic
def get_topic_summary_by_id(topic_id, strict=True):
"""Returns a domain object representing a topic summary.
Args:
topic_id: str. ID of the topic summary.
strict: bool. Whether to fail noisily if no topic summary with the given
id exists in the datastore.
Returns:
TopicSummary or None. The topic summary domain object corresponding to
a topic with the given topic_id, if it exists, or else None.
"""
topic_summary_model = topic_models.TopicSummaryModel.get(
topic_id, strict=strict)
if topic_summary_model:
topic_summary = get_topic_summary_from_model(topic_summary_model)
return topic_summary
else:
return None
def get_new_topic_id():
"""Returns a new topic id.
Returns:
str. A new topic id.
"""
return topic_models.TopicModel.get_new_id('')
def _create_topic(committer_id, topic, commit_message, commit_cmds):
"""Creates a new topic, and ensures that rights for a new topic
are saved first.
Args:
committer_id: str. ID of the committer.
topic: Topic. topic domain object.
commit_message: str. A description of changes made to the topic.
commit_cmds: list(TopicChange). A list of TopicChange objects that
represent change commands made to the given topic.
"""
topic.validate()
create_new_topic_rights(topic.id, committer_id)
model = topic_models.TopicModel(
id=topic.id,
name=topic.name,
canonical_name=topic.canonical_name,
description=topic.description,
language_code=topic.language_code,
canonical_story_ids=topic.canonical_story_ids,
additional_story_ids=topic.additional_story_ids,
uncategorized_skill_ids=topic.uncategorized_skill_ids,
subtopic_schema_version=topic.subtopic_schema_version,
next_subtopic_id=topic.next_subtopic_id,
subtopics=[subtopic.to_dict() for subtopic in topic.subtopics]
)
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
topic.version += 1
create_topic_summary(topic.id)
def save_new_topic(committer_id, topic):
"""Saves a new topic.
Args:
committer_id: str. ID of the committer.
topic: Topic. Topic to be saved.
Raises:
Exception. Topic with same name already exists.
"""
existing_topic = get_topic_by_name(topic.name)
if existing_topic is not None:
raise Exception('Topic with name \'%s\' already exists' % topic.name)
commit_message = (
'New topic created with name \'%s\'.' % topic.name)
_create_topic(
committer_id, topic, commit_message, [topic_domain.TopicChange({
'cmd': topic_domain.CMD_CREATE_NEW,
'name': topic.name
})])
def apply_change_list(topic_id, change_list):
"""Applies a changelist to a topic and returns the result. The incoming
changelist should not have simultaneuous creations and deletion of
subtopics.
Args:
topic_id: str. ID of the given topic.
change_list: list(TopicChange). A change list to be applied to the given
topic.
Raises:
Exception. The incoming changelist had simultaneuous creation and
deletion of subtopics.
Returns:
Topic, dict, list(int), list(int), list(SubtopicPageChange).
The modified topic object, the modified subtopic pages dict keyed
by subtopic page id containing the updated domain objects of
each subtopic page, a list of ids of the deleted subtopics,
a list of ids of the newly created subtopics and a list of changes
applied to modified subtopic pages.
"""
topic = get_topic_by_id(topic_id)
newly_created_subtopic_ids = []
existing_subtopic_page_ids_to_be_modified = []
deleted_subtopic_ids = []
modified_subtopic_pages_list = []
modified_subtopic_pages = {}
modified_subtopic_change_cmds = collections.defaultdict(list)
for change in change_list:
if (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
if change.subtopic_id < topic.next_subtopic_id:
existing_subtopic_page_ids_to_be_modified.append(
change.subtopic_id)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_change_cmds[subtopic_page_id].append(
change)
modified_subtopic_pages_list = (
subtopic_page_services.get_subtopic_pages_with_ids(
topic_id, existing_subtopic_page_ids_to_be_modified))
for subtopic_page in modified_subtopic_pages_list:
modified_subtopic_pages[subtopic_page.id] = subtopic_page
try:
for change in change_list:
if change.cmd == topic_domain.CMD_ADD_SUBTOPIC:
topic.add_subtopic(change.subtopic_id, change.title)
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
modified_subtopic_pages[subtopic_page_id] = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page( #pylint: disable=line-too-long
change.subtopic_id, topic_id)
)
modified_subtopic_change_cmds[subtopic_page_id].append(
subtopic_page_domain.SubtopicPageChange({
'cmd': 'create_new',
'topic_id': topic_id,
'subtopic_id': change.subtopic_id
}))
newly_created_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_DELETE_SUBTOPIC:
topic.delete_subtopic(change.subtopic_id)
if change.subtopic_id in newly_created_subtopic_ids:
raise Exception(
'The incoming changelist had simultaneous'
' creation and deletion of subtopics.')
deleted_subtopic_ids.append(change.subtopic_id)
elif change.cmd == topic_domain.CMD_ADD_UNCATEGORIZED_SKILL_ID:
topic.add_uncategorized_skill_id(
change.new_uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_UNCATEGORIZED_SKILL_ID:
topic.remove_uncategorized_skill_id(
change.uncategorized_skill_id)
elif change.cmd == topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC:
topic.move_skill_id_to_subtopic(
change.old_subtopic_id, change.new_subtopic_id,
change.skill_id)
elif change.cmd == topic_domain.CMD_REMOVE_SKILL_ID_FROM_SUBTOPIC:
topic.remove_skill_id_from_subtopic(
change.subtopic_id, change.skill_id)
elif change.cmd == topic_domain.CMD_UPDATE_TOPIC_PROPERTY:
if (change.property_name ==
topic_domain.TOPIC_PROPERTY_NAME):
topic.update_name(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_DESCRIPTION):
topic.update_description(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_CANONICAL_STORY_IDS):
topic.update_canonical_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_ADDITIONAL_STORY_IDS):
topic.update_additional_story_ids(change.new_value)
elif (change.property_name ==
topic_domain.TOPIC_PROPERTY_LANGUAGE_CODE):
topic.update_language_code(change.new_value)
else:
raise Exception('Invalid change dict.')
elif (change.cmd ==
subtopic_page_domain.CMD_UPDATE_SUBTOPIC_PAGE_PROPERTY):
subtopic_page_id = (
subtopic_page_domain.SubtopicPage.get_subtopic_page_id(
topic_id, change.subtopic_id))
if ((modified_subtopic_pages[subtopic_page_id] is None) or
(change.subtopic_id in deleted_subtopic_ids)):
raise Exception(
'The subtopic with id %s doesn\'t exist' % (
change.subtopic_id))
if (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_HTML):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_html(
change.new_value)
elif (change.property_name ==
subtopic_page_domain.
SUBTOPIC_PAGE_PROPERTY_PAGE_CONTENTS_AUDIO):
modified_subtopic_pages[
subtopic_page_id].update_page_contents_audio(
change.new_value)
else:
raise Exception('Invalid change dict.')
elif change.cmd == topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY:
if (change.property_name ==
topic_domain.SUBTOPIC_PROPERTY_TITLE):
topic.update_subtopic_title(
change.subtopic_id, change.new_value)
else:
raise Exception('Invalid change dict.')
elif (
change.cmd ==
topic_domain.CMD_MIGRATE_SUBTOPIC_SCHEMA_TO_LATEST_VERSION):
# Loading the topic model from the datastore into a
# Topic domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# topic is sufficient to apply the schema migration.
continue
else:
raise Exception('Invalid change dict.')
return (
topic, modified_subtopic_pages, deleted_subtopic_ids,
newly_created_subtopic_ids, modified_subtopic_change_cmds)
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, topic_id, change_list)
)
raise
def _save_topic(committer_id, topic, commit_message, change_list):
"""Validates a topic and commits it to persistent storage. If
successful, increments the version number of the incoming topic domain
object by 1.
Args:
committer_id: str. ID of the given committer.
topic: Topic. The topic domain object to be saved.
commit_message: str. The commit message.
change_list: list(TopicChange). List of changes applied to a topic.
Raises:
Exception: Received invalid change list.
Exception: The topic model and the incoming topic domain
object have different version numbers.
"""
if not change_list:
raise Exception(
'Unexpected error: received an invalid change list when trying to '
'save topic %s: %s' % (topic.id, change_list))
topic.validate()
topic_model = topic_models.TopicModel.get(topic.id, strict=False)
if topic_model is None:
topic_model = topic_models.TopicModel(id=topic.id)
else:
if topic.version > topic_model.version:
raise Exception(
'Unexpected error: trying to update version %s of topic '
'from version %s. Please reload the page and try again.'
% (topic_model.version, topic.version))
elif topic.version < topic_model.version:
raise Exception(
'Trying to update version %s of topic from version %s, '
'which is too old. Please reload the page and try again.'
% (topic_model.version, topic.version))
topic_model.description = topic.description
topic_model.name = topic.name
topic_model.canonical_story_ids = topic.canonical_story_ids
topic_model.additional_story_ids = topic.additional_story_ids
topic_model.uncategorized_skill_ids = topic.uncategorized_skill_ids
topic_model.subtopics = [subtopic.to_dict() for subtopic in topic.subtopics]
topic_model.subtopic_schema_version = topic.subtopic_schema_version
topic_model.next_subtopic_id = topic.next_subtopic_id
topic_model.language_code = topic.language_code
change_dicts = [change.to_dict() for change in change_list]
topic_model.commit(committer_id, commit_message, change_dicts)
memcache_services.delete(_get_topic_memcache_key(topic.id))
topic.version += 1
def update_topic_and_subtopic_pages(
committer_id, topic_id, change_list, commit_message):
"""Updates a topic and its subtopic pages. Commits changes.
Args:
committer_id: str. The id of the user who is performing the update
action.
topic_id: str. The topic id.
change_list: list(TopicChange and SubtopicPageChange). These changes are
applied in sequence to produce the resulting topic.
commit_message: str or None. A description of changes made to the
topic.
Raises:
ValueError: Current user does not have enough rights to edit a topic.
"""
if not commit_message:
raise ValueError(
'Expected a commit message, received none.')
(
updated_topic, updated_subtopic_pages_dict,
deleted_subtopic_ids, newly_created_subtopic_ids,
updated_subtopic_pages_change_cmds_dict
) = apply_change_list(topic_id, change_list)
_save_topic(
committer_id, updated_topic, commit_message, change_list
)
# The following loop deletes those subtopic pages that are already in the
# datastore, which are supposed to be deleted in the current changelist.
for subtopic_id in deleted_subtopic_ids:
if subtopic_id not in newly_created_subtopic_ids:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic_id)
for subtopic_page_id in updated_subtopic_pages_dict:
subtopic_page = updated_subtopic_pages_dict[subtopic_page_id]
subtopic_page_change_list = updated_subtopic_pages_change_cmds_dict[
subtopic_page_id]
subtopic_id = subtopic_page.get_subtopic_id_from_subtopic_page_id()
# The following condition prevents the creation of subtopic pages that
# were deleted above.
if subtopic_id not in deleted_subtopic_ids:
subtopic_page_services.save_subtopic_page(
committer_id, subtopic_page, commit_message,
subtopic_page_change_list)
create_topic_summary(topic_id)
def delete_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Removes skill with given id from the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the skill.
uncategorized_skill_id: str. The uncategorized skill to remove from the
topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'remove_uncategorized_skill_id',
'uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from uncategorized skill ids' % uncategorized_skill_id)
def add_uncategorized_skill(user_id, topic_id, uncategorized_skill_id):
"""Adds a skill with given id to the topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the skill is to be added.
uncategorized_skill_id: str. The id of the uncategorized skill to add
to the topic.
"""
change_list = [topic_domain.TopicChange({
'cmd': 'add_uncategorized_skill_id',
'new_uncategorized_skill_id': uncategorized_skill_id
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to uncategorized skill ids' % uncategorized_skill_id)
def delete_story(user_id, topic_id, story_id):
"""Removes story with given id from the topic.
NOTE TO DEVELOPERS: Presently, this function only removes story_id from
canonical_story_ids list.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic from which to remove the story.
story_id: str. The story to remove from the topic.
"""
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.delete_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Removed %s from canonical story ids' % story_id)
def add_canonical_story(user_id, topic_id, story_id):
"""Adds a story to the canonical story id list of a topic.
Args:
user_id: str. The id of the user who is performing the action.
topic_id: str. The id of the topic to which the story is to be added.
story_id: str. The story to add to the topic.
"""
topic = get_topic_by_id(topic_id)
old_canonical_story_ids = copy.deepcopy(topic.canonical_story_ids)
topic.add_canonical_story(story_id)
change_list = [topic_domain.TopicChange({
'cmd': 'update_topic_property',
'property_name': 'canonical_story_ids',
'old_value': old_canonical_story_ids,
'new_value': topic.canonical_story_ids
})]
update_topic_and_subtopic_pages(
user_id, topic_id, change_list,
'Added %s to canonical story ids' % story_id)
def delete_topic(committer_id, topic_id, force_deletion=False):
"""Deletes the topic with the given topic_id.
Args:
committer_id: str. ID of the committer.
topic_id: str. ID of the topic to be deleted.
force_deletion: bool. If true, the topic and its history are fully
deleted and are unrecoverable. Otherwise, the topic and all
its history are marked as deleted, but the corresponding models are
still retained in the datastore. This last option is the preferred
one.
Raises:
ValueError: User does not have enough rights to delete a topic.
"""
topic_rights_model = topic_models.TopicRightsModel.get(topic_id)
topic_rights_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# Delete the summary of the topic (regardless of whether
# force_deletion is True or not).
delete_topic_summary(topic_id)
topic_model = topic_models.TopicModel.get(topic_id)
for subtopic in topic_model.subtopics:
subtopic_page_services.delete_subtopic_page(
committer_id, topic_id, subtopic['id'])
topic_model.delete(
committer_id, feconf.COMMIT_MESSAGE_TOPIC_DELETED,
force_deletion=force_deletion)
# This must come after the topic is retrieved. Otherwise the memcache
# key will be reinstated.
topic_memcache_key = _get_topic_memcache_key(topic_id)
memcache_services.delete(topic_memcache_key)
def delete_topic_summary(topic_id):
"""Delete a topic summary model.
Args:
topic_id: str. ID of the topic whose topic summary is to
be deleted.
"""
topic_models.TopicSummaryModel.get(topic_id).delete()
def create_topic_summary(topic_id):
"""Creates and stores a summary of the given topic.
Args:
topic_id: str. ID of the topic.
"""
topic = get_topic_by_id(topic_id)
topic_summary = compute_summary_of_topic(topic)
save_topic_summary(topic_summary)
def compute_summary_of_topic(topic):
"""Create a TopicSummary domain object for a given Topic domain
object and return it.
Args:
topic: Topic. The topic object for which the summary is to be computed.
Returns:
TopicSummary. The computed summary for the given topic.
"""
topic_model_canonical_story_count = len(topic.canonical_story_ids)
topic_model_additional_story_count = len(topic.additional_story_ids)
topic_model_uncategorized_skill_count = len(topic.uncategorized_skill_ids)
topic_model_subtopic_count = len(topic.subtopics)
total_skill_count = topic_model_uncategorized_skill_count
for subtopic in topic.subtopics:
total_skill_count = total_skill_count + len(subtopic.skill_ids)
topic_summary = topic_domain.TopicSummary(
topic.id, topic.name, topic.canonical_name, topic.language_code,
topic.version, topic_model_canonical_story_count,
topic_model_additional_story_count,
topic_model_uncategorized_skill_count, topic_model_subtopic_count,
total_skill_count, topic.created_on, topic.last_updated
)
return topic_summary
def save_topic_summary(topic_summary):
"""Save a topic summary domain object as a TopicSummaryModel
entity in the datastore.
Args:
topic_summary: The topic summary object to be saved in the
datastore.
"""
topic_summary_model = topic_models.TopicSummaryModel(
id=topic_summary.id,
name=topic_summary.name,
canonical_name=topic_summary.canonical_name,
language_code=topic_summary.language_code,
version=topic_summary.version,
additional_story_count=topic_summary.additional_story_count,
canonical_story_count=topic_summary.canonical_story_count,
uncategorized_skill_count=topic_summary.uncategorized_skill_count,
subtopic_count=topic_summary.subtopic_count,
total_skill_count=topic_summary.total_skill_count,
topic_model_last_updated=topic_summary.topic_model_last_updated,
topic_model_created_on=topic_summary.topic_model_created_on
)
topic_summary_model.put()
def get_topic_rights_from_model(topic_rights_model):
"""Constructs a TopicRights object from the given topic rights model.
Args:
topic_rights_model: TopicRightsModel. Topic rights from the
datastore.
Returns:
TopicRights. The rights object created from the model.
"""
return topic_domain.TopicRights(
topic_rights_model.id,
topic_rights_model.manager_ids,
topic_rights_model.topic_is_published
)
def publish_topic(topic_id, committer_id):
"""Marks the given topic as published.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already published.
Exception. The user does not have enough rights to publish the topic.
"""
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to publish the topic.')
if topic_rights.topic_is_published:
raise Exception('The topic is already published.')
topic_rights.topic_is_published = True
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_PUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Published the topic', commit_cmds)
def unpublish_topic(topic_id, committer_id):
"""Marks the given topic as unpublished.
Args:
topic_id: str. The id of the given topic.
committer_id: str. ID of the committer.
Raises:
Exception. The given topic does not exist.
Exception. The topic is already unpublished.
Exception. The user does not have enough rights to unpublish the topic.
"""
topic_rights = get_topic_rights(topic_id, strict=False)
if topic_rights is None:
raise Exception('The given topic does not exist')
user = user_services.UserActionsInfo(committer_id)
if role_services.ACTION_CHANGE_TOPIC_STATUS not in user.actions:
raise Exception(
'The user does not have enough rights to unpublish the topic.')
if not topic_rights.topic_is_published:
raise Exception('The topic is already unpublished.')
topic_rights.topic_is_published = False
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_UNPUBLISH_TOPIC
})]
save_topic_rights(
topic_rights, committer_id, 'Unpublished the topic', commit_cmds)
def save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds):
"""Saves a TopicRights domain object to the datastore.
Args:
topic_rights: TopicRights. The rights object for the given
topic.
committer_id: str. ID of the committer.
commit_message: str. Descriptive message for the commit.
commit_cmds: list(TopicRightsChange). A list of commands describing
what kind of commit was done.
"""
model = topic_models.TopicRightsModel.get(topic_rights.id, strict=False)
model.manager_ids = topic_rights.manager_ids
model.topic_is_published = topic_rights.topic_is_published
commit_cmd_dicts = [commit_cmd.to_dict() for commit_cmd in commit_cmds]
model.commit(committer_id, commit_message, commit_cmd_dicts)
def create_new_topic_rights(topic_id, committer_id):
"""Creates a new topic rights object and saves it to the datastore.
Args:
topic_id: str. ID of the topic.
committer_id: str. ID of the committer.
"""
topic_rights = topic_domain.TopicRights(topic_id, [], False)
commit_cmds = [{'cmd': topic_domain.CMD_CREATE_NEW}]
topic_models.TopicRightsModel(
id=topic_rights.id,
manager_ids=topic_rights.manager_ids,
topic_is_published=topic_rights.topic_is_published
).commit(committer_id, 'Created new topic rights', commit_cmds)
def get_topic_rights(topic_id, strict=True):
"""Retrieves the rights object for the given topic.
Args:
topic_id: str. ID of the topic.
strict: bool. Whether to fail noisily if no topic with a given id
exists in the datastore.
Returns:
TopicRights. The rights object associated with the given topic.
Raises:
EntityNotFoundError. The topic with ID topic_id was not
found in the datastore.
"""
model = topic_models.TopicRightsModel.get(topic_id, strict=strict)
if model is None:
return None
return get_topic_rights_from_model(model)
def get_topic_rights_with_user(user_id):
"""Retrieves the rights object for all topics assigned to given user.
Args:
user_id: str. ID of the user.
Returns:
list(TopicRights). The rights objects associated with the topics
assigned to given user.
"""
topic_rights_models = topic_models.TopicRightsModel.get_by_user(user_id)
return [
get_topic_rights_from_model(model) for model in topic_rights_models
if model is not None]
def get_all_topic_rights():
"""Returns the rights object of all topics present in the datastore.
Returns:
dict. The dict of rights objects of all topics present in
the datastore keyed by topic id.
"""
topic_rights_models = topic_models.TopicRightsModel.get_all()
topic_rights = {}
for model in topic_rights_models:
rights = get_topic_rights_from_model(model)
topic_rights[rights.id] = rights
return topic_rights
def check_can_edit_topic(user, topic_rights):
"""Checks whether the user can edit the given topic.
Args:
user: UserActionsInfo. Object having user_id, role and actions for
given user.
topic_rights: TopicRights or None. Rights object for the given topic.
Returns:
bool. Whether the given user can edit the given topic.
"""
if topic_rights is None:
return False
if role_services.ACTION_EDIT_ANY_TOPIC in user.actions:
return True
if role_services.ACTION_EDIT_OWNED_TOPIC not in user.actions:
return False
if topic_rights.is_manager(user.user_id):
return True
return False
def deassign_user_from_all_topics(committer, user_id):
"""Deassigns given user from all topics assigned to them.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
user_id: str. The ID of the user.
Raises:
Exception. The committer does not have rights to modify a role.
"""
topic_rights_list = get_topic_rights_with_user(user_id)
for topic_rights in topic_rights_list:
topic_rights.manager_ids.remove(user_id)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_REMOVE_MANAGER_ROLE,
'removed_user_id': user_id
})]
save_topic_rights(
topic_rights, committer.user_id,
'Removed all assigned topics from %s' % (user_id), commit_cmds)
def assign_role(committer, assignee, new_role, topic_id):
"""Assigns a new role to the user.
Args:
committer: UserActionsInfo. UserActionsInfo object for the user
who is performing the action.
assignee: UserActionsInfo. UserActionsInfo object for the user
whose role is being changed.
new_role: str. The name of the new role. Possible values are:
ROLE_MANAGER
topic_id: str. ID of the topic.
Raises:
Exception. The committer does not have rights to modify a role.
Exception. The assignee is already a manager for the topic.
Exception. The assignee doesn't have enough rights to become a manager.
Exception. The role is invalid.
"""
committer_id = committer.user_id
topic_rights = get_topic_rights(topic_id)
if (role_services.ACTION_MODIFY_ROLES_FOR_ANY_ACTIVITY not in
committer.actions):
logging.error(
'User %s tried to allow user %s to be a %s of topic %s '
'but was refused permission.' % (
committer_id, assignee.user_id, new_role, topic_id))
raise Exception(
'UnauthorizedUserException: Could not assign new role.')
assignee_username = user_services.get_username(assignee.user_id)
if role_services.ACTION_EDIT_OWNED_TOPIC not in assignee.actions:
raise Exception(
'The assignee doesn\'t have enough rights to become a manager.')
old_role = topic_domain.ROLE_NONE
if topic_rights.is_manager(assignee.user_id):
old_role = topic_domain.ROLE_MANAGER
if new_role == topic_domain.ROLE_MANAGER:
if topic_rights.is_manager(assignee.user_id):
raise Exception('This user already is a manager for this topic')
topic_rights.manager_ids.append(assignee.user_id)
elif new_role == topic_domain.ROLE_NONE:
if topic_rights.is_manager(assignee.user_id):
topic_rights.manager_ids.remove(assignee.user_id)
else:
old_role = topic_domain.ROLE_NONE
else:
raise Exception('Invalid role: %s' % new_role)
commit_message = 'Changed role of %s from %s to %s' % (
assignee_username, old_role, new_role)
commit_cmds = [topic_domain.TopicRightsChange({
'cmd': topic_domain.CMD_CHANGE_ROLE,
'assignee_id': assignee.user_id,
'old_role': old_role,
'new_role': new_role
})]
save_topic_rights(topic_rights, committer_id, commit_message, commit_cmds)
| 38.320683
| 114
| 0.682174
|
c1957a877ea69974035f42a8db38ff5234fcd531
| 13,245
|
py
|
Python
|
src/Ikarus/strategies/StrategyBase.py
|
bilkosem/Icarus
|
16d66600f1764a43dccd1b19153b906452cdef5a
|
[
"Apache-2.0"
] | null | null | null |
src/Ikarus/strategies/StrategyBase.py
|
bilkosem/Icarus
|
16d66600f1764a43dccd1b19153b906452cdef5a
|
[
"Apache-2.0"
] | 2
|
2022-01-23T20:27:16.000Z
|
2022-01-30T15:51:35.000Z
|
src/Ikarus/strategies/StrategyBase.py
|
bilkosem/Icarus
|
16d66600f1764a43dccd1b19153b906452cdef5a
|
[
"Apache-2.0"
] | 1
|
2022-01-23T22:16:07.000Z
|
2022-01-23T22:16:07.000Z
|
import json
import logging
from binance.helpers import round_step_size
from sqlalchemy import false
from ..enums import *
import bson
import abc
import itertools
from ..objects import EState, EOrderType, ECommand, EnhancedJSONEncoder
from ..utils import safe_sum, round_step_downward, truncate, safe_multiply, safe_substract
from .. import binance_filters as filters
from ..exceptions import NotImplementedException
logger = logging.getLogger('app')
class StrategyBase(metaclass=abc.ABCMeta):
# NOTE: fee can stay here until a better place is found
fee = 0
def __init__(self, _name, _config, _symbol_info):
self.name = _name
self.alloc_ratio = 0
self.logger = logging.getLogger('app.{}'.format(__name__))
self.config = _config['strategy'][self.name]
self.max_lto = self.config.get('max_lto',1)
# NOTE: Assigning the fee multiple times is not the most optimal solution
StrategyBase.fee = _config['broker'].get('fee', 0)
# TODO: Rename this config as strategy config etc. because some modules means the whole config dict some are just a portion
self.quote_currency = _config['broker']['quote_currency']
# TODO: Make proper handling for symbol_info
self.symbol_info = _symbol_info
# NOTE: Hardcoded time-scales list (scales should be in ascending order)
self.min_period = self.config['time_scales'][0]
self.meta_do = list(itertools.product(self.config['time_scales'], self.config['pairs']))
# It seems possible to have this on_STAT_EXIT_EXP() like approach. Surely needs to be tried again.
# Since it facilitates so much new strategy creation and modular implementation
# NOTE: strategywise_alloc_rate determines the available rate of use from the main capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
self.strategywise_alloc_rate = 0 # Will be filled by the strategy manager
# NOTE: pairwise_alloc_rate determines the available rate of use from the strategywise allocated capital
# If self.strategywise_alloc_rate is 0.25 then this strategy can use max %25 of the main capital
pass
@staticmethod
def is_lto_dead(trade):
if trade.command == ECommand.CANCEL or trade.status == EState.CLOSED:
return True # Trade is dead
else:
return False # Trade is alive # Skip evaluation if non of this is true (LTO will be alive until the next cycle)
@staticmethod
async def run_logic(self, analysis_dict, trade_list, ikarus_time, total_qc, free_qc):
"""[summary]
Args:
analysis_dict ([type]): [description]
lto_list ([type]): [description]
df_balance ([type]): [description]
ikarus_time ([type]): [description]
total_qc ([type]): [description]
Returns:
[type]: [description]
"""
# Preliminary condition: all of the config['pairs'] exist in analysis_dict
if not set(self.config['pairs']).issubset(analysis_dict.keys()):
self.logger.warn(f"Configured pair \"{self.config['pairs']}\" does not exist in analysis_dict. Skipping {self.name}.run")
return []
# Initialize trade_dict to be filled
trade_objects = []
# Handle LTOs separately before the new evaluation
# Create a mapping between the pair and lto such as {'BTCUSDT':{...}, ...}
pair_grouped_ltos = {}
alive_lto_counter = 0
in_trade_capital = 0
dead_lto_capital = 0
for lto_idx in range(len(trade_list)):
# If handle_lto_logic fails then it means that the trade_list[lto_idx] is unchanged.
if not await StrategyBase.handle_lto_logic(self, analysis_dict, trade_list[lto_idx], ikarus_time):
self.logger.warn(f"Function failed: 'handle_lto_logic'. Trade info: '{trade_list[lto_idx]._id}', '{trade_list[lto_idx].strategy}'")
pair_grouped_ltos[trade_list[lto_idx].pair] = trade_list[lto_idx]
# It is needed to know how many of LTOs are dead or will be dead
if not StrategyBase.is_lto_dead(trade_list[lto_idx]):
# NOTE: in_trade_capital is only calcualted for LTOs that will last until at least next candle
#in_trade_capital += lto_list[lto_idx][PHASE_ENTER][TYPE_LIMIT]['amount']
# NOTE: For the enter_expire, PHASE_ENTER can be directly reflected to balance
# market_exit is not considered as dead lto
# The result of the OCO orders is unknown
in_trade_capital = safe_sum(in_trade_capital, trade_list[lto_idx].enter.amount)
alive_lto_counter += 1
# NOTE: TYPE_MARKET PHASE:_EXIT LTOs are considered as alive right here. Not sure if it is a good approach
else:
# Dead capital
dead_lto_capital = safe_sum(dead_lto_capital, trade_list[lto_idx].enter.amount)
# NOTE: Only iterate for the configured pairs. Do not run the strategy if any of them is missing in analysis_dict
total_lto_slot = min(self.max_lto, len(self.config['pairs']))
empty_lto_slot = total_lto_slot - alive_lto_counter
if empty_lto_slot < 1:
return [] # TODO Debug this ansync LTO issue buy doing debugging around here
# Evaluate pairwise_alloc_share
strategy_capital = safe_multiply(total_qc, self.strategywise_alloc_rate)
#for lto in lto_list:
# in_trade_capital += lto[PHASE_ENTER][TYPE_LIMIT]['amount']
free_strategy_capital = safe_substract(strategy_capital, in_trade_capital)
available_capital = min(free_strategy_capital, safe_sum(free_qc, dead_lto_capital))
# TODO: This can be updated to use some kind of precision from the symbol info instead of hardcoded 8
pairwise_alloc_share = truncate(available_capital/empty_lto_slot, 8)
#available_lto_capital = min(pairwise_alloc_share, free_qc+dead_lto_capital)
# Iterate over pairs and make decisions about them
for ao_pair in self.config['pairs']:
# Break if there is no empty_lto_slot left
if empty_lto_slot < 1:
break
# Continue if the LTO of the pair is not dead
if ao_pair in pair_grouped_ltos.keys():
if not StrategyBase.is_lto_dead(pair_grouped_ltos[ao_pair]):
continue
# Perform evaluation
if trade:= await self.make_decision(analysis_dict, ao_pair, ikarus_time, pairwise_alloc_share):
# Apply exchange filters
if not StrategyBase.apply_exchange_filters(trade.enter, self.symbol_info[ao_pair]):
continue
trade_objects.append(trade)
empty_lto_slot -= 1
return trade_objects
@staticmethod
async def handle_lto_logic(self, analysis_dict, trade, ikarus_time):
"""
This function decides what to do for the LTOs based on their 'status'
"""
is_success = False
if trade.status == EState.ENTER_EXP:
if self.config['action_mapping'][EState.ENTER_EXP] == ECommand.CANCEL:
is_success = await self.on_cancel(trade)
elif trade.status == EState.EXIT_EXP:
if self.config['action_mapping'][EState.EXIT_EXP] == ECommand.UPDATE:
is_success = await self.on_update(trade, ikarus_time, analysis_dict=analysis_dict)
elif self.config['action_mapping'][EState.EXIT_EXP] == ECommand.MARKET_EXIT:
# NOTE: Market exit requires the exit prices to be known, thus provide the analysis_dict to that
is_success = await StrategyBase.on_market_exit(self, trade, analysis_dict)
elif trade.status == EState.WAITING_EXIT:
# LTO is entered succesfully, so exit order should be executed
# NOTE: expire of the exit_module can be calculated after the trade entered
is_success = await self.on_waiting_exit(trade, analysis_dict)
else:
is_success = True
return is_success
@abc.abstractclassmethod
async def on_update(self):
pass
@staticmethod
async def on_market_exit(self, trade, analysis_dict):
# TODO: Create market exit logic
raise NotImplementedException()
'''
#lto = await StrategyBase._config_market_exit(lto, self.config['exit']['type'])
lto['exit'] = await StrategyBase._create_exit_module(
TYPE_MARKET,
0,
lto['result'][PHASE_ENTER]['quantity'],
analysis_dict[lto['pair']][self.min_period]['close'],
0)
lto['exit'][TYPE_MARKET] = await StrategyBase.apply_exchange_filters(lto, self.symbol_info[lto['pair']])
trade.exi
trade.command = ECommand.MARKET_EXIT
self.logger.info(f'LTO: market exit configured') # TODO: Add orderId
'''
return trade
@abc.abstractclassmethod
async def on_waiting_exit(self):
pass
@abc.abstractclassmethod
async def on_closed(self):
pass
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'run') and
callable(subclass.run) and
hasattr(subclass, 'dump_to') and
callable(subclass.dump_to) or
NotImplemented)
@staticmethod
def _eval_future_candle_time(start_time, count, minute): return bson.Int64(start_time + count*minute*60*1000)
@staticmethod
async def _config_market_exit(lto, type):
# TODO: NEXT NEXT Integrate fee to market order
# Continue here
# TODO: Integrate price to market order, even if it has no use
# For now, it works and I am not gonna touch it for a rework
lto['action'] = ACTN_MARKET_EXIT
lto['exit'][TYPE_MARKET] = {
'amount': lto['exit'][type]['amount'],
'quantity': lto['exit'][type]['quantity'],
'orderId': '',
}
return lto
@staticmethod
def apply_exchange_filters(trade_order, symbol_info):
# TODO: Make the function orer specific using trade_order instead of trade
"""
- Call this method prior to any order placement
- Apply the filter of exchange pair
- This methhod does not check if the current conditiones are good to go.
If a filter is not satisfied then it would create an exception. Validation
costs time. Maybe in future
- Separating enter and exit does not make any sense since the filters are valid for both side.
Returns:
Order: enter or exit module
"""
# LOT_SIZE
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#lot_size
if result := filters.lot_size(trade_order.quantity, symbol_info):
trade_order.quantity = result
else:
#logger.error(f"Filter failure: LOT_SIZE. {trade.strategy} in phase {phase} with quantity {str(trade.enter.quantity)}")
return False
# PRICE_FILTER
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#price_filter
if type(trade_order).__name__ == EOrderType.MARKET:
pass
elif type(trade_order).__name__ == EOrderType.LIMIT:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
if trade_order.price > float(symbol_info['filters'][0]['maxPrice']):
pass
# TODO: BUG: NEXT: Add proper error handling or check for the prices
elif type(trade_order).__name__ == EOrderType.OCO:
trade_order.set_price(round_step_downward(trade_order.price, float(symbol_info['filters'][0]['tickSize']))) # Fixing PRICE_FILTER: tickSize
trade_order.stopPrice = round_step_downward(trade_order.stopPrice, float(symbol_info['filters'][0]['tickSize']))
trade_order.stopLimitPrice = round_step_downward(trade_order.stopLimitPrice, float(symbol_info['filters'][0]['tickSize']))
if not filters.min_notional(trade_order.stopPrice, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
# MIN_NOTIONAL
# https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#min_notional
if not filters.min_notional(trade_order.price, trade_order.quantity, symbol_info):
logger.warn(f"Trade object skipped due to MIN_NOTIONAL filter for {symbol_info['symbol']}. NTO: {json.dumps(trade_order, cls=EnhancedJSONEncoder)}")
return False
return True
| 44.003322
| 174
| 0.649
|
5b7ad97e8a296dd0833d9093c60cb5eadfa07deb
| 802
|
py
|
Python
|
webapp/models.py
|
Build-Week-AirBnB-Price-Finder/airbnb-build-ttdsft57
|
a375711376c02203e47e35d36460023cb670890f
|
[
"MIT"
] | 2
|
2021-02-02T20:31:20.000Z
|
2021-02-02T23:50:22.000Z
|
webapp/models.py
|
Build-Week-AirBnB-Price-Finder/airbnb-build-ttdsft57
|
a375711376c02203e47e35d36460023cb670890f
|
[
"MIT"
] | 3
|
2021-02-03T21:33:54.000Z
|
2021-02-04T18:17:31.000Z
|
webapp/models.py
|
Build-Week-AirBnB-Price-Finder/airbnb-build-ttdsft57
|
a375711376c02203e47e35d36460023cb670890f
|
[
"MIT"
] | null | null | null |
"""SQLAlchemy models for AIrBnB listings"""
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
# Listing Table
class Listing(DB.Model):
"""AIrBnB listings corresponding to Hosts"""
id = DB.Column(DB.Integer, primary_key=True)
property_type = DB.Column(DB.String, nullable=False)
room_type = DB.Column(DB.String, nullable=False)
accommodates = DB.Column(DB.Integer, nullable=False)
bedrooms = DB.Column(DB.Integer, nullable=False)
baths = DB.Column(DB.Numeric, nullable=False)
zip = DB.Column(DB.Integer, nullable=False)
def __repr__(self):
rep = f"""Id: {self.id} Property Type: {self.property_type} Room Type: {self.room_type} Accommodates: {self.accommodates} Bedrooms: {self.bedrooms} Baths: {self.baths} Zip: {self.zip}"""
return rep
| 36.454545
| 194
| 0.705736
|
f5d9247cfb6d8965253122f1fa7cc8baa2c098e7
| 5,069
|
py
|
Python
|
labs/lab6/net.py
|
jamestiotio/networks
|
8967ee34c423989ff68eec650ba6ebb492499cb4
|
[
"MIT"
] | null | null | null |
labs/lab6/net.py
|
jamestiotio/networks
|
8967ee34c423989ff68eec650ba6ebb492499cb4
|
[
"MIT"
] | null | null | null |
labs/lab6/net.py
|
jamestiotio/networks
|
8967ee34c423989ff68eec650ba6ebb492499cb4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Lab 6 network script
# Based on original BGP exercise
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.log import lg, info, setLogLevel
from mininet.cli import CLI
from mininet.node import Switch, OVSKernelSwitch
from mininet.node import OVSController
from argparse import ArgumentParser
import sys
import os
import termcolor as T
import time
setLogLevel("info")
parser = ArgumentParser("Configure simple network in Mininet.")
parser.add_argument("--sleep", default=3, type=int)
args = parser.parse_args()
class myTopo(Topo):
"Simple topology example."
def __init__(self):
"Create custom topo."
# Initialize topology
Topo.__init__(self)
# Add external AS and switches
serverOne = self.addHost("extH1", ip="8.8.8.2/24")
serverTwo = self.addHost("extH2", ip="8.8.8.8/24")
extGW = self.addHost("extGW", ip="8.8.8.1/24")
extSwitch = self.addSwitch("extS1")
self.addLink(serverOne, extSwitch)
self.addLink(serverTwo, extSwitch)
self.addLink(extGW, extSwitch)
# Add internal hosts and switches
serverOne = self.addHost("srv1", ip="10.0.0.10/24")
serverTwo = self.addHost("srv2", ip="10.0.0.11/24")
intGW = self.addHost("intGW")
desktops = [self.addHost("h%d" % i, ip="0.0.0.0/24") for i in range(5)]
leftSwitch = self.addSwitch("s1")
rightSwitch = self.addSwitch("s2")
# Add links
self.addLink(serverOne, leftSwitch)
self.addLink(serverTwo, leftSwitch)
self.addLink(leftSwitch, rightSwitch)
self.addLink(intGW, rightSwitch)
for i in range(len(desktops)):
self.addLink(rightSwitch, "h%d" % i)
self.addLink(intGW, extGW)
return
def log(s, col="green"):
print(T.colored(s, col))
def enableNAT(net, hostn):
# this assumes that internal interface is eth0, external interface is eth1, and the network is 10.0.0.0/24
host = net.getNodeByName(hostn)
host.cmd(
"iptables -A FORWARD -o %s-eth1 -i %s-eth0 -s 10.0.0.0/24 -m conntrack --ctstate NEW -j ACCEPT"
% (hostn, hostn)
)
host.cmd("iptables -A FORWARD -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT")
host.cmd("iptables -t nat -F POSTROUTING")
host.cmd("iptables -t nat -A POSTROUTING -o %s-eth1 -j MASQUERADE" % hostn)
def startWebserver(net, hostname, text="Default web server"):
host = net.getNodeByName(hostname)
return host.popen("python webserver.py --text '%s'" % text, shell=True)
def main():
os.system("rm -f /tmp/R*.log /tmp/R*.pid logs/*")
os.system("mn -c >/dev/null 2>&1")
os.system("pgrep -f webserver.py | xargs kill -9")
os.system("killall -9 dnsmasq")
# os.system("service isc-dhcpd-server stop")
net = Mininet(topo=myTopo(), controller=OVSController, autoSetMacs=True)
net.start()
# Set default routes for ext hosts
for h in ["extH1", "extH2"]:
host = net.getNodeByName(h)
host.cmd("route add default gw 8.8.8.1")
# Let extGW drop all private network packets. Not entirely what would really happen, but close enough
host = net.getNodeByName("extGW")
host.cmd("iptables -I FORWARD -s 10.0.0.0/24 -j DROP")
# Enable forwarding for the routers
routes = {"intGW": "2.2.2.1", "extGW": "2.2.2.2"}
firstIP = {"intGW": "10.0.0.1", "extGW": "8.8.8.1"}
secondIP = {"intGW": "2.2.2.2", "extGW": "2.2.2.1"}
for h in ["intGW", "extGW"]:
host = net.getNodeByName(h)
host.cmd("sysctl -w net.ipv4.ip_forward=1")
host.cmd("ifconfig %s-eth0 %s netmask 255.255.255.0" % (h, firstIP[h]))
host.cmd("ifconfig %s-eth1 %s netmask 255.255.255.0" % (h, secondIP[h]))
host.cmd("route add default gw %s" % routes[h])
# enable NAT'ing on intGW
enableNAT(net, "intGW")
log("Configured the routers")
# start the dns server on 8.8.8.8
host = net.getNodeByName("extH2")
host.cmd("dnsmasq -C ./extH2DNS.conf")
log("Done with dnsmask")
# start the dhcp on srv1
host = net.getNodeByName("srv1")
host.cmd("dnsmasq -C ./srv1DHCP.conf")
host.cmd("route add default gw 10.0.0.1")
log("Done with dnsmasq start")
# configure server 2
host = net.getNodeByName("srv2")
host.cmd("route add default gw 10.0.0.1")
log("added the route")
# Set default routes for int hosts, or do this via DHCP?
for i in range(5):
host = net.getNodeByName("h%d" % i)
host.cmd("dhclient -r h%d-eth0" % i)
host.cmd("dhclient h%d-eth0" % i)
log("Received IP for h%d" % i)
log("Starting web servers", "yellow")
startWebserver(net, "extH1", "50.012 Networks web server")
# resolv.conf is shared between guests and host. Updating is generally unreliable
os.system("echo 'nameserver 8.8.8.8' > /etc/resolv.conf")
CLI(net)
net.stop()
os.system("killall -9 dnsmasq")
os.system("pgrep -f webserver.py | xargs kill -9")
if __name__ == "__main__":
main()
| 33.569536
| 110
| 0.635234
|
269a14006415d3c31a8df9c4a1ae30be16445a06
| 3,922
|
py
|
Python
|
SpaceHabitRPG/Tests/TestHelpers/DatabaseTestSetupCleanup.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | null | null | null |
SpaceHabitRPG/Tests/TestHelpers/DatabaseTestSetupCleanup.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | 13
|
2016-07-19T04:13:20.000Z
|
2016-08-17T06:06:47.000Z
|
SpaceHabitRPG/Tests/TestHelpers/DatabaseTestSetupCleanup.py
|
joelliusp/SpaceHabit
|
5656ef4d9c57f3e58d0ed756a3aa754c8a7dd6a5
|
[
"MIT"
] | null | null | null |
"""
This module is for inserting test data into the database and for cleaning up
the test data so that it doesn't get in the way of other tests
"""
from AuthenticationLayer import AuthenticationFields as authFields
from AllDBFields import ZoneDBFields
from AllDBFields import ZoneDefinitionFields
from AllDBFields import HeroDbFields
from AllDBFields import MonsterDbFields
from AllDBFields import MonsterDefinitionFields
from AllDBFields import AccountDbFields
from Monster import Monster
from Hero import Hero
from Zone import Zone
import DatabaseLayer
import CryptKeeper
import MonkeyPatches
def clean_up():
"""
remove all the data from the test database
"""
connection = DatabaseLayer.open_conn()
connection.drop_database("test")
def create_test_user_using_default_values():
loginPk = create_login_using_test_values()
accountPk = create_test_account_using_default_values(loginPk)
heroPk = setup_test_hero_using_default_values(accountPk)
return {'loginPk':loginPk,'accountPk':accountPk,'heroPk':heroPk}
def create_login_using_test_values():
collection = DatabaseLayer.get_table(authFields.COLLECTION_NAME)
pk = collection.insert_one({authFields.USER_LOGIN:"a@b.c",authFields.USER_DESC:
"a@b.c",authFields.USER_PASSWORD:CryptKeeper.encrypt_str("123456")}).inserted_id
return pk
def create_test_account_using_default_values(loginPk=None):
from Account import Account
accountPk = Account.create_new_account_in_db(loginPk)
return accountPk
def create_test_hero_dict(accountPk = None):
import uuid
zone = create_test_zone_dict()
zoneVisitCounts = {ZoneDefinitionFields.ASTEROID_FIELD:5,ZoneDefinitionFields.EMPTY_SPACE:11}
hero = {
HeroDbFields.ACCOUNT_PK_KEY: accountPk,
HeroDbFields.SHIP_NAME: "USS testship",
HeroDbFields.LVL:7,
HeroDbFields.GOLD:100,
HeroDbFields.MAX_HP: 40,
HeroDbFields.NOW_HP: 20,
HeroDbFields.MAX_XP: 50,
HeroDbFields.NOW_XP: 0,
HeroDbFields.ATTACK_LVL: 5,
HeroDbFields.DEFENSE_LVL: 6,
HeroDbFields.PUBLIC_KEY: uuid.uuid4().hex,
HeroDbFields.ZONE_VISIT_COUNTS: zoneVisitCounts,
HeroDbFields.ZONE: create_test_zone_obj().dict,
HeroDbFields.MONSTER: create_test_monster_obj().dict
}
return hero
def setup_test_hero_using_default_values(accountPk = None):
h = create_test_hero_using_default_values(accountPk)
h.save_changes()
return h.get_pk()
def create_test_hero_using_default_values(accountPk = None):
MonkeyPatches.set_mock_choice()
h = Hero.construct_unsaved_hero(accountPk,"")
MonkeyPatches.reset_choice()
return h
def create_test_hero_using_test_values(accountPk = None):
heroDict = create_test_hero_dict(accountPk = None)
h = Hero.construct_model_from_dict(heroDict)
h.save_changes()
return h.get_pk()
def create_test_zone_dict(zoneKey=None):
if not zoneKey:
zoneKey = ZoneDefinitionFields.EMPTY_SPACE
zoneDict = {
ZoneDBFields.DEFINITION_KEY: zoneKey,
ZoneDBFields.SUFFIX: "Alpha",
ZoneDBFields.MONSTERS_KILLED: 2,
ZoneDBFields.MAX_MONSTERS: 15,
ZoneDBFields.LVL: 3,
}
return zoneDict
def create_test_zone_obj(zoneKey=None):
zoneDict = create_test_zone_dict(zoneKey)
zoneObj = Zone.construct_model_from_dict(zoneDict)
return zoneObj
def create_test_monster_dict(monsterkey=None):
if not monsterkey:
monsterkey = MonsterDefinitionFields.AMBUSH_PIRATES
monsterDict = {
MonsterDbFields.DEFINITION_KEY: monsterkey,
MonsterDbFields.MAX_HP: 150,
MonsterDbFields.NOW_HP: 100,
MonsterDbFields.LVL: 15
}
return monsterDict
def create_test_monster_obj(monsterkey=None):
monsterDict = create_test_monster_dict(monsterkey)
monsterObj = Monster.construct_model_from_dict(monsterDict)
return monsterObj
| 30.88189
| 96
| 0.754462
|
55c4df1911842586c61c17d7fef308b47fdc9c38
| 189
|
py
|
Python
|
hello/core.py
|
roiweinreb/nbdev-hello
|
c321375617132f0881cf70aa7033afd8e3f05700
|
[
"Apache-2.0"
] | null | null | null |
hello/core.py
|
roiweinreb/nbdev-hello
|
c321375617132f0881cf70aa7033afd8e3f05700
|
[
"Apache-2.0"
] | 1
|
2022-02-26T06:58:38.000Z
|
2022-02-26T06:58:38.000Z
|
hello/core.py
|
roiweinreb/nbdev-hello
|
c321375617132f0881cf70aa7033afd8e3f05700
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/00_core.ipynb (unless otherwise specified).
__all__ = ['mySum']
# Cell
def mySum(x,y):
'''return sum of x and y'''
return x+y
| 23.625
| 97
| 0.671958
|
5e5a64afc5c5cc5f51437b5dfb0ba8912ca48728
| 4,967
|
py
|
Python
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/mixed_pooling.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 45
|
2015-04-26T04:45:51.000Z
|
2022-01-24T15:03:55.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/mixed_pooling.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 8
|
2018-07-20T20:54:51.000Z
|
2020-06-12T05:36:04.000Z
|
u24_lymphocyte/third_party/treeano/sandbox/nodes/mixed_pooling.py
|
ALSM-PhD/quip_classification
|
7347bfaa5cf11ae2d7a528fbcc43322a12c795d3
|
[
"BSD-3-Clause"
] | 22
|
2018-05-21T23:57:20.000Z
|
2022-02-21T00:48:32.000Z
|
"""
from
"Generalizing Pooling Functions in Convolutional Neural Networks: Mixed,
Gated, and Tree"
http://arxiv.org/abs/1509.08985
"""
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import batch_fold
fX = theano.config.floatX
@treeano.register_node("mixed_pool")
class MixedPoolNode(treeano.Wrapper0NodeImpl):
"""
mixed max-average pooling
NOTE: currently uses DnnPoolNode to work for 2D and 3D
"""
hyperparameter_names = (
tuple([x
for x in tn.DnnPoolNode.hyperparameter_names
if x != "mode"])
+ ("learnable",))
def architecture_children(self):
mean_seq_node = tn.SequentialNode(
self.name + "_mean_seq",
[tn.DnnMeanPoolNode(self.name + "_mean_pool"),
tn.MultiplyConstantNode(self.name + "_mean_const_mult")]
)
max_seq_node = tn.SequentialNode(
self.name + "_max_seq",
[tn.DnnMaxPoolNode(self.name + "_max_pool"),
tn.MultiplyConstantNode(self.name + "_max_const_mult")]
)
return [tn.ElementwiseSumNode(self.name + "_sum_mixed",
[max_seq_node, mean_seq_node])]
def init_state(self, network):
super(MixedPoolNode, self).init_state(network)
learnable = network.find_hyperparameter(["learnable"], False)
# TODO parameterize init alpha
alpha = 0.5
if learnable:
alpha = network.create_vw(
"alpha",
shape=(),
is_shared=True,
tags = {"parameter"},
default_inits=[treeano.inits.ConstantInit(alpha)],
).variable
network.set_hyperparameter(self.name + "_max_const_mult",
"value",
alpha)
network.set_hyperparameter(self.name + "_mean_const_mult",
"value",
1 - alpha)
@treeano.register_node("gated_pool_2d")
class GatedPool2DNode(treeano.Wrapper0NodeImpl):
"""
gated max-average pooling
NOTE: not sure how this deals with ignore_border
"""
hyperparameter_names = tuple([x
for x in tn.Pool2DNode.hyperparameter_names
if x != "mode"])
def architecture_children(self):
gate_node = tn.SequentialNode(
self.name + "_gate_seq",
[batch_fold.AddAxisNode(self.name + "_add_axis", axis=2),
batch_fold.FoldUnfoldAxisIntoBatchNode(
self.name + "_batch_fold",
# NOTE: using dnn conv, since pooling is normally strided
# and the normal conv is slow with strides
tn.DnnConv2DWithBiasNode(self.name + "_conv",
num_filters=1),
axis=1),
batch_fold.RemoveAxisNode(self.name + "_remove_axis", axis=2),
tn.SigmoidNode(self.name + "_gate_sigmoid")]
)
inverse_gate_node = tn.SequentialNode(
self.name + "_max_gate",
[tn.ReferenceNode(self.name + "_gate_ref",
reference=gate_node.name),
tn.MultiplyConstantNode(self.name + "_", value=-1),
tn.AddConstantNode(self.name + "_add1", value=1)])
mean_node = tn.ElementwiseProductNode(
self.name + "_mean_product",
[tn.MeanPool2DNode(self.name + "_mean_pool"),
gate_node])
max_node = tn.ElementwiseProductNode(
self.name + "_max_product",
[tn.MaxPool2DNode(self.name + "_max_pool"),
inverse_gate_node])
return [tn.ElementwiseSumNode(self.name + "_sum",
[mean_node, max_node])]
def init_state(self, network):
super(GatedPool2DNode, self).init_state(network)
conv_node_name = self.name + "_conv"
network.forward_hyperparameter(conv_node_name,
"filter_size",
["pool_size"])
has_stride = network.maybe_forward_hyperparameter(conv_node_name,
"conv_stride",
["pool_stride",
"stride"])
# by default, the stride of a pool is the same as the pool_size
if not has_stride:
network.forward_hyperparameter(conv_node_name,
"conv_stride",
["pool_size"])
network.maybe_forward_hyperparameter(conv_node_name,
"conv_pad",
["pool_pad", "pad"])
| 35.478571
| 77
| 0.529696
|
a4777eadde9df7410f2351931c6f879e99bfb26f
| 1,749
|
py
|
Python
|
Algorithms/Sorting/Python_Sorting/bubble_sort.py
|
TeacherManoj0131/HacktoberFest2020-Contributions
|
c7119202fdf211b8a6fc1eadd0760dbb706a679b
|
[
"MIT"
] | 256
|
2020-09-30T19:31:34.000Z
|
2021-11-20T18:09:15.000Z
|
Algorithms/Sorting/Python_Sorting/bubble_sort.py
|
TeacherManoj0131/HacktoberFest2020-Contributions
|
c7119202fdf211b8a6fc1eadd0760dbb706a679b
|
[
"MIT"
] | 293
|
2020-09-30T19:14:54.000Z
|
2021-06-06T02:34:47.000Z
|
Algorithms/Sorting/Python_Sorting/bubble_sort.py
|
TeacherManoj0131/HacktoberFest2020-Contributions
|
c7119202fdf211b8a6fc1eadd0760dbb706a679b
|
[
"MIT"
] | 1,620
|
2020-09-30T18:37:44.000Z
|
2022-03-03T20:54:22.000Z
|
'''
Python implementation of bubble sort.
Algo is simple terms is that in each cycle compare the ith elem with it's adjacent elem and swap if it's bigger.
This way in each cycle biggest elem "bubbles up" to the end of the list
Can be used to check if an array/list is already sorted or not. Since there will be no swaps in first iteration itself.
'''
import random
import os
random.seed(os.urandom(1024))
def is_sorted(array):
return all(array[i] <= array[i+1] for i in range(len(array)-1))
def bubble_sort(input_array):
swap=0
for j in range(len(input_array)-1):
for i in range(len(input_array)-1):
if input_array[i]>input_array[i+1]:
swap += 1
input_array[i], input_array[i+1] = input_array[i+1], input_array[i]
if swap == 0:
#since no swaps are done, means already sorted. Hence, break free from the chains. :D
print("Array is already sorted")
break
return input_array
if __name__ == '__main__':
print("testing with sample containing no duplicates")
input_unique = random.sample(range(10),10)
print(input_unique)
print(bubble_sort(input_unique))
print("Input unique is sorted", is_sorted(input_unique))
bubble_sort(input_unique)
print("***************************************** \n")
print("testing with sample containing duplicates")
input_duplicate = [random.randrange(1, 10) for _ in range(0, 10)]
print(input_duplicate)
print(bubble_sort(input_duplicate))
print("Input duplicate is sorted", is_sorted(input_duplicate))
print("***************************************** \n")
print("Random testing with duplicate elements")
print(bubble_sort([1,1,1,1,2,3,3,3,3,3,5]))
| 38.021739
| 119
| 0.646655
|
0901fa801c1d4ef82cc4e44decbbc8aa25481b5d
| 36,385
|
py
|
Python
|
simulation/python_standard_lib/test/test_deque.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | null | null | null |
simulation/python_standard_lib/test/test_deque.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | 1
|
2021-02-03T01:56:56.000Z
|
2021-02-03T01:56:56.000Z
|
simulation/python_standard_lib/test/test_deque.py
|
john-grando/pyExpandObjects
|
c08b1d1bc45684bc71c0f49b4d2f22c707cd4aa4
|
[
"BSD-3-Clause"
] | 1
|
2022-01-11T18:31:05.000Z
|
2022-01-11T18:31:05.000Z
|
from collections import deque
import unittest
from test import support, seq_tests
import gc
import weakref
import copy
import pickle
import random
import struct
BIG = 100000
def fail():
raise SyntaxError
yield 1
class BadCmp:
def __eq__(self, other):
raise RuntimeError
class MutateCmp:
def __init__(self, deque, result):
self.deque = deque
self.result = result
def __eq__(self, other):
self.deque.clear()
return self.result
class TestBasic(unittest.TestCase):
def test_basics(self):
d = deque(range(-5125, -5000))
d.__init__(range(200))
for i in range(200, 400):
d.append(i)
for i in reversed(range(-200, 0)):
d.appendleft(i)
self.assertEqual(list(d), list(range(-200, 400)))
self.assertEqual(len(d), 600)
left = [d.popleft() for i in range(250)]
self.assertEqual(left, list(range(-200, 50)))
self.assertEqual(list(d), list(range(50, 400)))
right = [d.pop() for i in range(250)]
right.reverse()
self.assertEqual(right, list(range(150, 400)))
self.assertEqual(list(d), list(range(50, 150)))
def test_maxlen(self):
self.assertRaises(ValueError, deque, 'abc', -1)
self.assertRaises(ValueError, deque, 'abc', -2)
it = iter(range(10))
d = deque(it, maxlen=3)
self.assertEqual(list(it), [])
self.assertEqual(repr(d), 'deque([7, 8, 9], maxlen=3)')
self.assertEqual(list(d), [7, 8, 9])
self.assertEqual(d, deque(range(10), 3))
d.append(10)
self.assertEqual(list(d), [8, 9, 10])
d.appendleft(7)
self.assertEqual(list(d), [7, 8, 9])
d.extend([10, 11])
self.assertEqual(list(d), [9, 10, 11])
d.extendleft([8, 7])
self.assertEqual(list(d), [7, 8, 9])
d = deque(range(200), maxlen=10)
d.append(d)
support.unlink(support.TESTFN)
fo = open(support.TESTFN, "w")
try:
fo.write(str(d))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
support.unlink(support.TESTFN)
d = deque(range(10), maxlen=None)
self.assertEqual(repr(d), 'deque([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])')
fo = open(support.TESTFN, "w")
try:
fo.write(str(d))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_maxlen_zero(self):
it = iter(range(100))
deque(it, maxlen=0)
self.assertEqual(list(it), [])
it = iter(range(100))
d = deque(maxlen=0)
d.extend(it)
self.assertEqual(list(it), [])
it = iter(range(100))
d = deque(maxlen=0)
d.extendleft(it)
self.assertEqual(list(it), [])
def test_maxlen_attribute(self):
self.assertEqual(deque().maxlen, None)
self.assertEqual(deque('abc').maxlen, None)
self.assertEqual(deque('abc', maxlen=4).maxlen, 4)
self.assertEqual(deque('abc', maxlen=2).maxlen, 2)
self.assertEqual(deque('abc', maxlen=0).maxlen, 0)
with self.assertRaises(AttributeError):
d = deque('abc')
d.maxlen = 10
def test_count(self):
for s in ('', 'abracadabra', 'simsalabim'*500+'abc'):
s = list(s)
d = deque(s)
for letter in 'abcdefghijklmnopqrstuvwxyz':
self.assertEqual(s.count(letter), d.count(letter), (s, d, letter))
self.assertRaises(TypeError, d.count) # too few args
self.assertRaises(TypeError, d.count, 1, 2) # too many args
class BadCompare:
def __eq__(self, other):
raise ArithmeticError
d = deque([1, 2, BadCompare(), 3])
self.assertRaises(ArithmeticError, d.count, 2)
d = deque([1, 2, 3])
self.assertRaises(ArithmeticError, d.count, BadCompare())
class MutatingCompare:
def __eq__(self, other):
self.d.pop()
return True
m = MutatingCompare()
d = deque([1, 2, 3, m, 4, 5])
m.d = d
self.assertRaises(RuntimeError, d.count, 3)
# test issue11004
# block advance failed after rotation aligned elements on right side of block
d = deque([None]*16)
for i in range(len(d)):
d.rotate(-1)
d.rotate(1)
self.assertEqual(d.count(1), 0)
self.assertEqual(d.count(None), 16)
def test_comparisons(self):
d = deque('xabc')
d.popleft()
for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
self.assertEqual(d==e, type(d)==type(e) and list(d)==list(e))
self.assertEqual(d!=e, not(type(d)==type(e) and list(d)==list(e)))
args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
for x in args:
for y in args:
self.assertEqual(x == y, list(x) == list(y), (x,y))
self.assertEqual(x != y, list(x) != list(y), (x,y))
self.assertEqual(x < y, list(x) < list(y), (x,y))
self.assertEqual(x <= y, list(x) <= list(y), (x,y))
self.assertEqual(x > y, list(x) > list(y), (x,y))
self.assertEqual(x >= y, list(x) >= list(y), (x,y))
def test_contains(self):
n = 200
d = deque(range(n))
for i in range(n):
self.assertTrue(i in d)
self.assertTrue((n+1) not in d)
# Test detection of mutation during iteration
d = deque(range(n))
d[n//2] = MutateCmp(d, False)
with self.assertRaises(RuntimeError):
n in d
# Test detection of comparison exceptions
d = deque(range(n))
d[n//2] = BadCmp()
with self.assertRaises(RuntimeError):
n in d
def test_contains_count_stop_crashes(self):
class A:
def __eq__(self, other):
d.clear()
return NotImplemented
d = deque([A(), A()])
with self.assertRaises(RuntimeError):
_ = 3 in d
d = deque([A(), A()])
with self.assertRaises(RuntimeError):
_ = d.count(3)
def test_extend(self):
d = deque('a')
self.assertRaises(TypeError, d.extend, 1)
d.extend('bcd')
self.assertEqual(list(d), list('abcd'))
d.extend(d)
self.assertEqual(list(d), list('abcdabcd'))
def test_add(self):
d = deque()
e = deque('abc')
f = deque('def')
self.assertEqual(d + d, deque())
self.assertEqual(e + f, deque('abcdef'))
self.assertEqual(e + e, deque('abcabc'))
self.assertEqual(e + d, deque('abc'))
self.assertEqual(d + e, deque('abc'))
self.assertIsNot(d + d, deque())
self.assertIsNot(e + d, deque('abc'))
self.assertIsNot(d + e, deque('abc'))
g = deque('abcdef', maxlen=4)
h = deque('gh')
self.assertEqual(g + h, deque('efgh'))
with self.assertRaises(TypeError):
deque('abc') + 'def'
def test_iadd(self):
d = deque('a')
d += 'bcd'
self.assertEqual(list(d), list('abcd'))
d += d
self.assertEqual(list(d), list('abcdabcd'))
def test_extendleft(self):
d = deque('a')
self.assertRaises(TypeError, d.extendleft, 1)
d.extendleft('bcd')
self.assertEqual(list(d), list(reversed('abcd')))
d.extendleft(d)
self.assertEqual(list(d), list('abcddcba'))
d = deque()
d.extendleft(range(1000))
self.assertEqual(list(d), list(reversed(range(1000))))
self.assertRaises(SyntaxError, d.extendleft, fail())
def test_getitem(self):
n = 200
d = deque(range(n))
l = list(range(n))
for i in range(n):
d.popleft()
l.pop(0)
if random.random() < 0.5:
d.append(i)
l.append(i)
for j in range(1-len(l), len(l)):
assert d[j] == l[j]
d = deque('superman')
self.assertEqual(d[0], 's')
self.assertEqual(d[-1], 'n')
d = deque()
self.assertRaises(IndexError, d.__getitem__, 0)
self.assertRaises(IndexError, d.__getitem__, -1)
def test_index(self):
for n in 1, 2, 30, 40, 200:
d = deque(range(n))
for i in range(n):
self.assertEqual(d.index(i), i)
with self.assertRaises(ValueError):
d.index(n+1)
# Test detection of mutation during iteration
d = deque(range(n))
d[n//2] = MutateCmp(d, False)
with self.assertRaises(RuntimeError):
d.index(n)
# Test detection of comparison exceptions
d = deque(range(n))
d[n//2] = BadCmp()
with self.assertRaises(RuntimeError):
d.index(n)
# Test start and stop arguments behavior matches list.index()
elements = 'ABCDEFGHI'
nonelement = 'Z'
d = deque(elements * 2)
s = list(elements * 2)
for start in range(-5 - len(s)*2, 5 + len(s) * 2):
for stop in range(-5 - len(s)*2, 5 + len(s) * 2):
for element in elements + 'Z':
try:
target = s.index(element, start, stop)
except ValueError:
with self.assertRaises(ValueError):
d.index(element, start, stop)
else:
self.assertEqual(d.index(element, start, stop), target)
# Test large start argument
d = deque(range(0, 10000, 10))
for step in range(100):
i = d.index(8500, 700)
self.assertEqual(d[i], 8500)
# Repeat test with a different internal offset
d.rotate()
def test_index_bug_24913(self):
d = deque('A' * 3)
with self.assertRaises(ValueError):
i = d.index("Hello world", 0, 4)
def test_insert(self):
# Test to make sure insert behaves like lists
elements = 'ABCDEFGHI'
for i in range(-5 - len(elements)*2, 5 + len(elements) * 2):
d = deque('ABCDEFGHI')
s = list('ABCDEFGHI')
d.insert(i, 'Z')
s.insert(i, 'Z')
self.assertEqual(list(d), s)
def test_insert_bug_26194(self):
data = 'ABC'
d = deque(data, maxlen=len(data))
with self.assertRaises(IndexError):
d.insert(2, None)
elements = 'ABCDEFGHI'
for i in range(-len(elements), len(elements)):
d = deque(elements, maxlen=len(elements)+1)
d.insert(i, 'Z')
if i >= 0:
self.assertEqual(d[i], 'Z')
else:
self.assertEqual(d[i-1], 'Z')
def test_imul(self):
for n in (-10, -1, 0, 1, 2, 10, 1000):
d = deque()
d *= n
self.assertEqual(d, deque())
self.assertIsNone(d.maxlen)
for n in (-10, -1, 0, 1, 2, 10, 1000):
d = deque('a')
d *= n
self.assertEqual(d, deque('a' * n))
self.assertIsNone(d.maxlen)
for n in (-10, -1, 0, 1, 2, 10, 499, 500, 501, 1000):
d = deque('a', 500)
d *= n
self.assertEqual(d, deque('a' * min(n, 500)))
self.assertEqual(d.maxlen, 500)
for n in (-10, -1, 0, 1, 2, 10, 1000):
d = deque('abcdef')
d *= n
self.assertEqual(d, deque('abcdef' * n))
self.assertIsNone(d.maxlen)
for n in (-10, -1, 0, 1, 2, 10, 499, 500, 501, 1000):
d = deque('abcdef', 500)
d *= n
self.assertEqual(d, deque(('abcdef' * n)[-500:]))
self.assertEqual(d.maxlen, 500)
def test_mul(self):
d = deque('abc')
self.assertEqual(d * -5, deque())
self.assertEqual(d * 0, deque())
self.assertEqual(d * 1, deque('abc'))
self.assertEqual(d * 2, deque('abcabc'))
self.assertEqual(d * 3, deque('abcabcabc'))
self.assertIsNot(d * 1, d)
self.assertEqual(deque() * 0, deque())
self.assertEqual(deque() * 1, deque())
self.assertEqual(deque() * 5, deque())
self.assertEqual(-5 * d, deque())
self.assertEqual(0 * d, deque())
self.assertEqual(1 * d, deque('abc'))
self.assertEqual(2 * d, deque('abcabc'))
self.assertEqual(3 * d, deque('abcabcabc'))
d = deque('abc', maxlen=5)
self.assertEqual(d * -5, deque())
self.assertEqual(d * 0, deque())
self.assertEqual(d * 1, deque('abc'))
self.assertEqual(d * 2, deque('bcabc'))
self.assertEqual(d * 30, deque('bcabc'))
def test_setitem(self):
n = 200
d = deque(range(n))
for i in range(n):
d[i] = 10 * i
self.assertEqual(list(d), [10*i for i in range(n)])
l = list(d)
for i in range(1-n, 0, -1):
d[i] = 7*i
l[i] = 7*i
self.assertEqual(list(d), l)
def test_delitem(self):
n = 500 # O(n**2) test, don't make this too big
d = deque(range(n))
self.assertRaises(IndexError, d.__delitem__, -n-1)
self.assertRaises(IndexError, d.__delitem__, n)
for i in range(n):
self.assertEqual(len(d), n-i)
j = random.randrange(-len(d), len(d))
val = d[j]
self.assertIn(val, d)
del d[j]
self.assertNotIn(val, d)
self.assertEqual(len(d), 0)
def test_reverse(self):
n = 500 # O(n**2) test, don't make this too big
data = [random.random() for i in range(n)]
for i in range(n):
d = deque(data[:i])
r = d.reverse()
self.assertEqual(list(d), list(reversed(data[:i])))
self.assertIs(r, None)
d.reverse()
self.assertEqual(list(d), data[:i])
self.assertRaises(TypeError, d.reverse, 1) # Arity is zero
def test_rotate(self):
s = tuple('abcde')
n = len(s)
d = deque(s)
d.rotate(1) # verify rot(1)
self.assertEqual(''.join(d), 'eabcd')
d = deque(s)
d.rotate(-1) # verify rot(-1)
self.assertEqual(''.join(d), 'bcdea')
d.rotate() # check default to 1
self.assertEqual(tuple(d), s)
for i in range(n*3):
d = deque(s)
e = deque(d)
d.rotate(i) # check vs. rot(1) n times
for j in range(i):
e.rotate(1)
self.assertEqual(tuple(d), tuple(e))
d.rotate(-i) # check that it works in reverse
self.assertEqual(tuple(d), s)
e.rotate(n-i) # check that it wraps forward
self.assertEqual(tuple(e), s)
for i in range(n*3):
d = deque(s)
e = deque(d)
d.rotate(-i)
for j in range(i):
e.rotate(-1) # check vs. rot(-1) n times
self.assertEqual(tuple(d), tuple(e))
d.rotate(i) # check that it works in reverse
self.assertEqual(tuple(d), s)
e.rotate(i-n) # check that it wraps backaround
self.assertEqual(tuple(e), s)
d = deque(s)
e = deque(s)
e.rotate(BIG+17) # verify on long series of rotates
dr = d.rotate
for i in range(BIG+17):
dr()
self.assertEqual(tuple(d), tuple(e))
self.assertRaises(TypeError, d.rotate, 'x') # Wrong arg type
self.assertRaises(TypeError, d.rotate, 1, 10) # Too many args
d = deque()
d.rotate() # rotate an empty deque
self.assertEqual(d, deque())
def test_len(self):
d = deque('ab')
self.assertEqual(len(d), 2)
d.popleft()
self.assertEqual(len(d), 1)
d.pop()
self.assertEqual(len(d), 0)
self.assertRaises(IndexError, d.pop)
self.assertEqual(len(d), 0)
d.append('c')
self.assertEqual(len(d), 1)
d.appendleft('d')
self.assertEqual(len(d), 2)
d.clear()
self.assertEqual(len(d), 0)
def test_underflow(self):
d = deque()
self.assertRaises(IndexError, d.pop)
self.assertRaises(IndexError, d.popleft)
def test_clear(self):
d = deque(range(100))
self.assertEqual(len(d), 100)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(list(d), [])
d.clear() # clear an empty deque
self.assertEqual(list(d), [])
def test_remove(self):
d = deque('abcdefghcij')
d.remove('c')
self.assertEqual(d, deque('abdefghcij'))
d.remove('c')
self.assertEqual(d, deque('abdefghij'))
self.assertRaises(ValueError, d.remove, 'c')
self.assertEqual(d, deque('abdefghij'))
# Handle comparison errors
d = deque(['a', 'b', BadCmp(), 'c'])
e = deque(d)
self.assertRaises(RuntimeError, d.remove, 'c')
for x, y in zip(d, e):
# verify that original order and values are retained.
self.assertTrue(x is y)
# Handle evil mutator
for match in (True, False):
d = deque(['ab'])
d.extend([MutateCmp(d, match), 'c'])
self.assertRaises(IndexError, d.remove, 'c')
self.assertEqual(d, deque())
def test_repr(self):
d = deque(range(200))
e = eval(repr(d))
self.assertEqual(list(d), list(e))
d.append(d)
self.assertIn('...', repr(d))
def test_print(self):
d = deque(range(200))
d.append(d)
try:
support.unlink(support.TESTFN)
fo = open(support.TESTFN, "w")
print(d, file=fo, end='')
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(d))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_init(self):
self.assertRaises(TypeError, deque, 'abc', 2, 3)
self.assertRaises(TypeError, deque, 1)
def test_hash(self):
self.assertRaises(TypeError, hash, deque('abc'))
def test_long_steadystate_queue_popleft(self):
for size in (0, 1, 2, 100, 1000):
d = deque(range(size))
append, pop = d.append, d.popleft
for i in range(size, BIG):
append(i)
x = pop()
if x != i - size:
self.assertEqual(x, i-size)
self.assertEqual(list(d), list(range(BIG-size, BIG)))
def test_long_steadystate_queue_popright(self):
for size in (0, 1, 2, 100, 1000):
d = deque(reversed(range(size)))
append, pop = d.appendleft, d.pop
for i in range(size, BIG):
append(i)
x = pop()
if x != i - size:
self.assertEqual(x, i-size)
self.assertEqual(list(reversed(list(d))),
list(range(BIG-size, BIG)))
def test_big_queue_popleft(self):
pass
d = deque()
append, pop = d.append, d.popleft
for i in range(BIG):
append(i)
for i in range(BIG):
x = pop()
if x != i:
self.assertEqual(x, i)
def test_big_queue_popright(self):
d = deque()
append, pop = d.appendleft, d.pop
for i in range(BIG):
append(i)
for i in range(BIG):
x = pop()
if x != i:
self.assertEqual(x, i)
def test_big_stack_right(self):
d = deque()
append, pop = d.append, d.pop
for i in range(BIG):
append(i)
for i in reversed(range(BIG)):
x = pop()
if x != i:
self.assertEqual(x, i)
self.assertEqual(len(d), 0)
def test_big_stack_left(self):
d = deque()
append, pop = d.appendleft, d.popleft
for i in range(BIG):
append(i)
for i in reversed(range(BIG)):
x = pop()
if x != i:
self.assertEqual(x, i)
self.assertEqual(len(d), 0)
def test_roundtrip_iter_init(self):
d = deque(range(200))
e = deque(d)
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
def test_pickle(self):
for d in deque(range(200)), deque(range(200), 100):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, i)
e = pickle.loads(s)
self.assertNotEqual(id(e), id(d))
self.assertEqual(list(e), list(d))
self.assertEqual(e.maxlen, d.maxlen)
def test_pickle_recursive(self):
for d in deque('abc'), deque('abc', 3):
d.append(d)
for i in range(pickle.HIGHEST_PROTOCOL + 1):
e = pickle.loads(pickle.dumps(d, i))
self.assertNotEqual(id(e), id(d))
self.assertEqual(id(e[-1]), id(e))
self.assertEqual(e.maxlen, d.maxlen)
def test_iterator_pickle(self):
orig = deque(range(200))
data = [i*1.01 for i in orig]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
# initial iterator
itorg = iter(orig)
dump = pickle.dumps((itorg, orig), proto)
it, d = pickle.loads(dump)
for i, x in enumerate(data):
d[i] = x
self.assertEqual(type(it), type(itorg))
self.assertEqual(list(it), data)
# running iterator
next(itorg)
dump = pickle.dumps((itorg, orig), proto)
it, d = pickle.loads(dump)
for i, x in enumerate(data):
d[i] = x
self.assertEqual(type(it), type(itorg))
self.assertEqual(list(it), data[1:])
# empty iterator
for i in range(1, len(data)):
next(itorg)
dump = pickle.dumps((itorg, orig), proto)
it, d = pickle.loads(dump)
for i, x in enumerate(data):
d[i] = x
self.assertEqual(type(it), type(itorg))
self.assertEqual(list(it), [])
# exhausted iterator
self.assertRaises(StopIteration, next, itorg)
dump = pickle.dumps((itorg, orig), proto)
it, d = pickle.loads(dump)
for i, x in enumerate(data):
d[i] = x
self.assertEqual(type(it), type(itorg))
self.assertEqual(list(it), [])
def test_deepcopy(self):
mut = [10]
d = deque([mut])
e = copy.deepcopy(d)
self.assertEqual(list(d), list(e))
mut[0] = 11
self.assertNotEqual(id(d), id(e))
self.assertNotEqual(list(d), list(e))
def test_copy(self):
mut = [10]
d = deque([mut])
e = copy.copy(d)
self.assertEqual(list(d), list(e))
mut[0] = 11
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
for i in range(5):
for maxlen in range(-1, 6):
s = [random.random() for j in range(i)]
d = deque(s) if maxlen == -1 else deque(s, maxlen)
e = d.copy()
self.assertEqual(d, e)
self.assertEqual(d.maxlen, e.maxlen)
self.assertTrue(all(x is y for x, y in zip(d, e)))
def test_copy_method(self):
mut = [10]
d = deque([mut])
e = d.copy()
self.assertEqual(list(d), list(e))
mut[0] = 11
self.assertNotEqual(id(d), id(e))
self.assertEqual(list(d), list(e))
def test_reversed(self):
for s in ('abcd', range(2000)):
self.assertEqual(list(reversed(deque(s))), list(reversed(s)))
def test_reversed_new(self):
klass = type(reversed(deque()))
for s in ('abcd', range(2000)):
self.assertEqual(list(klass(deque(s))), list(reversed(s)))
def test_gc_doesnt_blowup(self):
import gc
# This used to assert-fail in deque_traverse() under a debug
# build, or run wild with a NULL pointer in a release build.
d = deque()
for i in range(100):
d.append(1)
gc.collect()
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for deque iterator objects
class C(object):
pass
for i in range(2):
obj = C()
ref = weakref.ref(obj)
if i == 0:
container = deque([obj, 1])
else:
container = reversed(deque([obj, 1]))
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
BLOCKLEN = 64
basesize = support.calcvobjsize('2P4nP')
blocksize = struct.calcsize('P%dPP' % BLOCKLEN)
self.assertEqual(object.__sizeof__(deque()), basesize)
check = self.check_sizeof
check(deque(), basesize + blocksize)
check(deque('a'), basesize + blocksize)
check(deque('a' * (BLOCKLEN - 1)), basesize + blocksize)
check(deque('a' * BLOCKLEN), basesize + 2 * blocksize)
check(deque('a' * (42 * BLOCKLEN)), basesize + 43 * blocksize)
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (seq_tests.Sequence, seq_tests.IterFunc,
seq_tests.IterGen, seq_tests.IterFuncStop,
seq_tests.itermulti, seq_tests.iterfunc):
self.assertEqual(list(deque(g(s))), list(g(s)))
self.assertRaises(TypeError, deque, seq_tests.IterNextOnly(s))
self.assertRaises(TypeError, deque, seq_tests.IterNoNext(s))
self.assertRaises(ZeroDivisionError, deque, seq_tests.IterGenExc(s))
def test_iter_with_altered_data(self):
d = deque('abcdefg')
it = iter(d)
d.pop()
self.assertRaises(RuntimeError, next, it)
def test_runtime_error_on_empty_deque(self):
d = deque()
it = iter(d)
d.append(10)
self.assertRaises(RuntimeError, next, it)
class Deque(deque):
pass
class DequeWithBadIter(deque):
def __iter__(self):
raise TypeError
class TestSubclass(unittest.TestCase):
def test_basics(self):
d = Deque(range(25))
d.__init__(range(200))
for i in range(200, 400):
d.append(i)
for i in reversed(range(-200, 0)):
d.appendleft(i)
self.assertEqual(list(d), list(range(-200, 400)))
self.assertEqual(len(d), 600)
left = [d.popleft() for i in range(250)]
self.assertEqual(left, list(range(-200, 50)))
self.assertEqual(list(d), list(range(50, 400)))
right = [d.pop() for i in range(250)]
right.reverse()
self.assertEqual(right, list(range(150, 400)))
self.assertEqual(list(d), list(range(50, 150)))
d.clear()
self.assertEqual(len(d), 0)
def test_copy_pickle(self):
d = Deque('abc')
e = d.__copy__()
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
e = Deque(d)
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
e = pickle.loads(s)
self.assertNotEqual(id(d), id(e))
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
d = Deque('abcde', maxlen=4)
e = d.__copy__()
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
e = Deque(d)
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
e = pickle.loads(s)
self.assertNotEqual(id(d), id(e))
self.assertEqual(type(d), type(e))
self.assertEqual(list(d), list(e))
def test_pickle_recursive(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for d in Deque('abc'), Deque('abc', 3):
d.append(d)
e = pickle.loads(pickle.dumps(d, proto))
self.assertNotEqual(id(e), id(d))
self.assertEqual(type(e), type(d))
self.assertEqual(e.maxlen, d.maxlen)
dd = d.pop()
ee = e.pop()
self.assertEqual(id(ee), id(e))
self.assertEqual(e, d)
d.x = d
e = pickle.loads(pickle.dumps(d, proto))
self.assertEqual(id(e.x), id(e))
for d in DequeWithBadIter('abc'), DequeWithBadIter('abc', 2):
self.assertRaises(TypeError, pickle.dumps, d, proto)
def test_weakref(self):
d = deque('gallahad')
p = weakref.proxy(d)
self.assertEqual(str(p), str(d))
d = None
self.assertRaises(ReferenceError, str, p)
def test_strange_subclass(self):
class X(deque):
def __iter__(self):
return iter([])
d1 = X([1,2,3])
d2 = X([4,5,6])
d1 == d2 # not clear if this is supposed to be True or False,
# but it used to give a SystemError
@support.cpython_only
def test_bug_31608(self):
# The interpreter used to crash in specific cases where a deque
# subclass returned a non-deque.
class X(deque):
pass
d = X()
def bad___new__(cls, *args, **kwargs):
return [42]
X.__new__ = bad___new__
with self.assertRaises(TypeError):
d * 42 # shouldn't crash
with self.assertRaises(TypeError):
d + deque([1, 2, 3]) # shouldn't crash
class SubclassWithKwargs(deque):
def __init__(self, newarg=1):
deque.__init__(self)
class TestSubclassWithKwargs(unittest.TestCase):
def test_subclass_with_kwargs(self):
# SF bug #1486663 -- this used to erroneously raise a TypeError
SubclassWithKwargs(newarg=1)
class TestSequence(seq_tests.CommonTest):
type2test = deque
def test_getitem(self):
# For now, bypass tests that require slicing
pass
def test_getslice(self):
# For now, bypass tests that require slicing
pass
def test_subscript(self):
# For now, bypass tests that require slicing
pass
def test_free_after_iterating(self):
# For now, bypass tests that require slicing
self.skipTest("Exhausted deque iterator doesn't free a deque")
#==============================================================================
libreftest = """
Example from the Library Reference: Doc/lib/libcollections.tex
>>> from collections import deque
>>> d = deque('ghi') # make a new deque with three items
>>> for elem in d: # iterate over the deque's elements
... print(elem.upper())
G
H
I
>>> d.append('j') # add a new entry to the right side
>>> d.appendleft('f') # add a new entry to the left side
>>> d # show the representation of the deque
deque(['f', 'g', 'h', 'i', 'j'])
>>> d.pop() # return and remove the rightmost item
'j'
>>> d.popleft() # return and remove the leftmost item
'f'
>>> list(d) # list the contents of the deque
['g', 'h', 'i']
>>> d[0] # peek at leftmost item
'g'
>>> d[-1] # peek at rightmost item
'i'
>>> list(reversed(d)) # list the contents of a deque in reverse
['i', 'h', 'g']
>>> 'h' in d # search the deque
True
>>> d.extend('jkl') # add multiple elements at once
>>> d
deque(['g', 'h', 'i', 'j', 'k', 'l'])
>>> d.rotate(1) # right rotation
>>> d
deque(['l', 'g', 'h', 'i', 'j', 'k'])
>>> d.rotate(-1) # left rotation
>>> d
deque(['g', 'h', 'i', 'j', 'k', 'l'])
>>> deque(reversed(d)) # make a new deque in reverse order
deque(['l', 'k', 'j', 'i', 'h', 'g'])
>>> d.clear() # empty the deque
>>> d.pop() # cannot pop from an empty deque
Traceback (most recent call last):
File "<pyshell#6>", line 1, in -toplevel-
d.pop()
IndexError: pop from an empty deque
>>> d.extendleft('abc') # extendleft() reverses the input order
>>> d
deque(['c', 'b', 'a'])
>>> def delete_nth(d, n):
... d.rotate(-n)
... d.popleft()
... d.rotate(n)
...
>>> d = deque('abcdef')
>>> delete_nth(d, 2) # remove the entry at d[2]
>>> d
deque(['a', 'b', 'd', 'e', 'f'])
>>> def roundrobin(*iterables):
... pending = deque(iter(i) for i in iterables)
... while pending:
... task = pending.popleft()
... try:
... yield next(task)
... except StopIteration:
... continue
... pending.append(task)
...
>>> for value in roundrobin('abc', 'd', 'efgh'):
... print(value)
...
a
d
e
b
f
c
g
h
>>> def maketree(iterable):
... d = deque(iterable)
... while len(d) > 1:
... pair = [d.popleft(), d.popleft()]
... d.append(pair)
... return list(d)
...
>>> print(maketree('abcdefgh'))
[[[['a', 'b'], ['c', 'd']], [['e', 'f'], ['g', 'h']]]]
"""
#==============================================================================
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
import sys
test_classes = (
TestBasic,
TestVariousIteratorArgs,
TestSubclass,
TestSubclassWithKwargs,
TestSequence,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
# doctests
from test import test_deque
support.run_doctest(test_deque, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| 33.228311
| 86
| 0.500289
|
389367d52044c4493ca8e4c91072eb0b5aeb4b99
| 3,335
|
py
|
Python
|
tests/test_search.py
|
dkratzert/StructureFinder
|
e0be67cb47ad589b87c7175a02c908734e415ee8
|
[
"MIT"
] | 12
|
2017-11-23T08:45:17.000Z
|
2022-02-16T18:02:35.000Z
|
tests/test_search.py
|
dkratzert/StructureFinder
|
e0be67cb47ad589b87c7175a02c908734e415ee8
|
[
"MIT"
] | 4
|
2019-12-12T15:28:50.000Z
|
2022-02-22T06:28:48.000Z
|
tests/test_search.py
|
dkratzert/StructureFinder
|
e0be67cb47ad589b87c7175a02c908734e415ee8
|
[
"MIT"
] | null | null | null |
import unittest
from pymatgen.core import lattice
from searcher import database_handler
from shelxfile.dsrmath import vol_unitcell
class TestSearch(unittest.TestCase):
def setUp(self) -> None:
self.dbfilename = 'test-data/test.sql'
self.structures = database_handler.StructureTable(self.dbfilename)
# more results:
self.m_vol_threshold = 0.04
self.m_ltol = 0.08
self.m_atol = 1.8
# regular:
self.vol_threshold = 0.02
self.ltol = 0.03
self.atol = 1.0
def test_cellfind(self):
idlist = []
cell = [10.930, 12.716, 15.709, 90.000, 90.000, 90.000]
results = [(49, 9.242, 12.304, 18.954, 90.0, 92.74, 90.0, 2153.0),
(260, 10.93, 12.7162, 15.7085, 90.0, 90.0, 90.0, 2183.3),
(10, 13.5918, 10.7345, 16.442, 90.0, 113.142, 90.0, 2205.9),
(244, 13.5918, 10.7345, 16.442, 90.0, 113.142, 90.0, 2205.9),
(207, 16.139, 5.117, 26.887, 90.0, 90.0, 90.0, 2220.4)]
volume = vol_unitcell(*cell)
cells = self.structures.find_by_volume(volume, self.vol_threshold)
self.assertEqual(cells, results)
lattice1 = lattice.Lattice.from_parameters(*cell)
for curr_cell in cells:
try:
lattice2 = lattice.Lattice.from_parameters(*curr_cell[1:7])
except ValueError:
continue
mapping = lattice1.find_mapping(lattice2, self.ltol, self.atol, skip_rotation_matrix=True)
if mapping:
idlist.append(curr_cell[0])
self.assertEqual(idlist, [260])
def test_more_results_cellfind(self):
idlist = []
cell = [10.930, 12.716, 15.709, 90.000, 90.000, 90.000]
results = [(251, 13.432, 10.5988, 16.2393, 90.0, 113.411, 90.0, 2121.6),
(161, 14.8208, 8.1939, 17.4844, 90.0, 91.185, 90.0, 2122.9),
(49, 9.242, 12.304, 18.954, 90.0, 92.74, 90.0, 2153.0),
(260, 10.93, 12.7162, 15.7085, 90.0, 90.0, 90.0, 2183.3),
(10, 13.5918, 10.7345, 16.442, 90.0, 113.142, 90.0, 2205.9),
(244, 13.5918, 10.7345, 16.442, 90.0, 113.142, 90.0, 2205.9),
(207, 16.139, 5.117, 26.887, 90.0, 90.0, 90.0, 2220.4),
(71, 14.815, 14.264, 10.55, 90.0, 90.0, 90.0, 2229.4),
(113, 15.187, 12.883, 11.468, 90.0, 90.0, 90.0, 2243.8),
(129, 27.858, 8.094, 9.951, 90.0, 90.0, 90.0, 2243.8),
(1, 10.36, 18.037, 25.764, 127.03, 129.81, 90.51, 2260.487670154818),
(12, 10.36, 18.037, 25.764, 127.03, 129.81, 90.51, 2260.487670154818)]
volume = vol_unitcell(*cell)
cells = self.structures.find_by_volume(volume, self.m_vol_threshold)
self.assertEqual(cells, results)
lattice1 = lattice.Lattice.from_parameters(*cell)
for curr_cell in cells:
try:
lattice2 = lattice.Lattice.from_parameters(*curr_cell[1:7])
except ValueError:
continue
mapping = lattice1.find_mapping(lattice2, self.m_ltol, self.m_atol, skip_rotation_matrix=True)
if mapping:
idlist.append(curr_cell[0])
self.assertEqual(idlist, [260, 113])
| 46.971831
| 106
| 0.552624
|
a98b309c96afd9d886588a0f53e5b3307b6022ad
| 8,499
|
py
|
Python
|
node/db_store.py
|
dlcorporation/openbazaar
|
4dd30911bbfcc96544b41fc90f55abebfeb592c8
|
[
"MIT"
] | 1
|
2018-12-01T15:32:13.000Z
|
2018-12-01T15:32:13.000Z
|
node/db_store.py
|
dlcorporation/openbazaar
|
4dd30911bbfcc96544b41fc90f55abebfeb592c8
|
[
"MIT"
] | null | null | null |
node/db_store.py
|
dlcorporation/openbazaar
|
4dd30911bbfcc96544b41fc90f55abebfeb592c8
|
[
"MIT"
] | 1
|
2018-12-01T15:32:15.000Z
|
2018-12-01T15:32:15.000Z
|
#!/usr/bin/env python
#
# This library is free software, distributed under the terms of
# the GNU Lesser General Public License Version 3, or any later version.
# See the COPYING file included in this archive
#
# The docstrings in this module contain epytext markup; API documentation
# may be created by processing this file with epydoc: http://epydoc.sf.net
import logging
from pysqlcipher import dbapi2 as sqlite
class Obdb(object):
""" Interface for db storage. Serves as segregation of the persistence layer
and the application logic
"""
def __init__(self, db_path, disable_sqlite_crypt=False):
self.db_path = db_path
self.con = False
self.log = logging.getLogger('DB')
self.disable_sqlite_crypt = disable_sqlite_crypt
def _connectToDb(self):
""" Opens a db connection
"""
self.con = sqlite.connect(
self.db_path, detect_types=sqlite.PARSE_DECLTYPES
)
sqlite.register_adapter(bool, int)
sqlite.register_converter("bool", lambda v: bool(int(v)))
self.con.row_factory = self._dictFactory
if not self.disable_sqlite_crypt:
# Use PRAGMA key to encrypt / decrypt database.
cur = self.con.cursor()
cur.execute("PRAGMA key = 'passphrase';")
def _disconnectFromDb(self):
""" Close the db connection
"""
if self.con:
try:
self.con.close()
except Exception:
pass
self.con = False
@staticmethod
def _dictFactory(cursor, row):
""" A factory that allows sqlite to return a dictionary instead of a tuple
"""
d = {}
for idx, col in enumerate(cursor.description):
if row[idx] is None:
d[col[0]] = ""
else:
d[col[0]] = row[idx]
return d
@staticmethod
def _beforeStoring(value):
""" Method called before executing SQL identifiers.
"""
return unicode(value)
def getOrCreate(self, table, where_dict, data_dict=False):
""" This method attempts to grab the record first. If it fails to find it,
it will create it.
@param table: The table to search to
@param get_where_dict: A dictionary with the WHERE/SET clauses
"""
if not data_dict:
data_dict = where_dict
entries = self.selectEntries(table, where_dict)
if len(entries) == 0:
self.insertEntry(table, data_dict)
return self.selectEntries(table, where_dict)[0]
def updateEntries(self, table, where_dict, set_dict, operator="AND"):
""" A wrapper for the SQL UPDATE operation
@param table: The table to search to
@param whereDict: A dictionary with the WHERE clauses
@param setDict: A dictionary with the SET clauses
"""
self._connectToDb()
with self.con:
cur = self.con.cursor()
sets = []
wheres = []
where_part = []
set_part = []
for key, value in set_dict.iteritems():
if type(value) == bool:
value = bool(value)
key = self._beforeStoring(key)
value = self._beforeStoring(value)
sets.append(value)
set_part.append("%s = ?" % key)
set_part = ",".join(set_part)
for key, value in where_dict.iteritems():
sign = "="
if type(value) is dict:
sign = value["sign"]
value = value["value"]
key = self._beforeStoring(key)
value = self._beforeStoring(value)
wheres.append(value)
where_part.append("%s %s ?" % (key, sign))
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "UPDATE %s SET %s WHERE %s" \
% (table, set_part, where_part)
self.log.debug('query: %s' % query)
cur.execute(query, tuple(sets + wheres))
self._disconnectFromDb()
def insertEntry(self, table, update_dict):
""" A wrapper for the SQL INSERT operation
@param table: The table to search to
@param updateDict: A dictionary with the values to set
"""
self._connectToDb()
with self.con:
cur = self.con.cursor()
sets = []
updatefield_part = []
setfield_part = []
for key, value in update_dict.iteritems():
if type(value) == bool:
value = bool(value)
key = self._beforeStoring(key)
value = self._beforeStoring(value)
sets.append(value)
updatefield_part.append(key)
setfield_part.append("?")
updatefield_part = ",".join(updatefield_part)
setfield_part = ",".join(setfield_part)
query = "INSERT INTO %s(%s) VALUES(%s)" \
% (table, updatefield_part, setfield_part)
cur.execute(query, tuple(sets))
lastrowid = cur.lastrowid
self.log.debug("query: %s " % query)
self._disconnectFromDb()
if lastrowid:
return lastrowid
def selectEntries(self, table, where_dict=None, operator="AND", order_field="id", order="ASC", limit=None, limit_offset=None, select_fields="*"):
"""
A wrapper for the SQL SELECT operation. It will always return all the
attributes for the selected rows.
@param table: The table to search
@param whereDict: A dictionary with the WHERE clauses.
If ommited it will return all the rows of the table.
"""
if where_dict is None:
where_dict = {"\"1\"": "1"}
self._connectToDb()
with self.con:
cur = self.con.cursor()
wheres = []
where_part = []
for key, value in where_dict.iteritems():
sign = "="
if type(value) is dict:
sign = value["sign"]
value = value["value"]
key = self._beforeStoring(key)
value = self._beforeStoring(value)
wheres.append(value)
where_part.append("%s %s ?" % (key, sign))
if limit is not None and limit_offset is None:
limit_clause = "LIMIT %s" % limit
elif limit is not None and limit_offset is not None:
limit_clause = "LIMIT %s, %s" % (limit_offset, limit)
else:
limit_clause = ""
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "SELECT * FROM %s WHERE %s ORDER BY %s %s %s" \
% (table, where_part, order_field, order, limit_clause)
self.log.debug("query: %s " % query)
cur.execute(query, tuple(wheres))
rows = cur.fetchall()
self._disconnectFromDb()
return rows
def deleteEntries(self, table, where_dict=None, operator="AND"):
"""
A wrapper for the SQL DELETE operation. It will always return all the
attributes for the selected rows.
@param table: The table to search
@param whereDict: A dictionary with the WHERE clauses.
If ommited it will delete all the rows of the table.
"""
if where_dict is None:
where_dict = {"\"1\"": "1"}
self._connectToDb()
with self.con:
cur = self.con.cursor()
dels = []
where_part = []
for key, value in where_dict.iteritems():
sign = "="
if type(value) is dict:
sign = value["sign"]
value = value["value"]
key = self._beforeStoring(key)
value = self._beforeStoring(value)
dels.append(value)
where_part.append("%s %s ?" % (key, sign))
operator = " " + operator + " "
where_part = operator.join(where_part)
query = "DELETE FROM %s WHERE %s" \
% (table, where_part)
self.log.debug('Query: %s' % query)
cur.execute(query, dels)
self._disconnectFromDb()
| 38.457014
| 149
| 0.54077
|
cb75130d8b9fa0fcb9e45c4cafd17341d8429e36
| 6,454
|
py
|
Python
|
planemo/conda.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/conda.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
planemo/conda.py
|
shiltemann/planemo
|
6bb642c61df4af91f6c873dfc1c6c3c06d1d491a
|
[
"CC-BY-3.0"
] | null | null | null |
"""Planemo specific utilities for dealing with conda.
The extend Galaxy/galaxy-lib's features with planemo specific idioms.
"""
from __future__ import absolute_import
import collections
import os
import threading
from galaxy.tools.deps import conda_util
from planemo.exit_codes import EXIT_CODE_FAILED_DEPENDENCIES, ExitCodeException
from planemo.io import error, shell
from planemo.tools import yield_tool_sources_on_paths
MESSAGE_ERROR_FAILED_INSTALL = "Attempted to install conda and failed."
MESSAGE_ERROR_CANNOT_INSTALL = "Cannot install Conda - perhaps due to a failed installation or permission problems."
MESSAGE_ERROR_NOT_INSTALLING = "Conda not configured - run ``planemo conda_init`` or pass ``--conda_auto_init`` to continue."
BEST_PRACTICE_CHANNELS = ["conda-forge", "bioconda", "defaults"]
def build_conda_context(ctx, **kwds):
"""Build a galaxy-lib CondaContext tailored to planemo use.
Using planemo's common command-line/global config options.
"""
condarc_override_default = os.path.join(ctx.workspace, "condarc")
conda_prefix = kwds.get("conda_prefix", None)
use_planemo_shell = kwds.get("use_planemo_shell_exec", True)
ensure_channels = kwds.get("conda_ensure_channels", "")
condarc_override = kwds.get("condarc", condarc_override_default)
use_local = kwds.get("conda_use_local", False)
shell_exec = shell if use_planemo_shell else None
conda_context = conda_util.CondaContext(conda_prefix=conda_prefix,
ensure_channels=ensure_channels,
condarc_override=condarc_override,
use_local=use_local,
shell_exec=shell_exec)
handle_auto_init = kwds.get("handle_auto_init", False)
if handle_auto_init and not conda_context.is_installed():
auto_init = kwds.get("conda_auto_init", True)
failed = True
if auto_init:
if conda_context.can_install_conda():
if conda_util.install_conda(conda_context):
error(MESSAGE_ERROR_FAILED_INSTALL)
else:
failed = False
else:
error(MESSAGE_ERROR_CANNOT_INSTALL)
else:
error(MESSAGE_ERROR_NOT_INSTALLING)
if failed:
raise ExitCodeException(EXIT_CODE_FAILED_DEPENDENCIES)
if handle_auto_init:
conda_context.ensure_conda_build_installed_if_needed()
return conda_context
def collect_conda_targets(ctx, paths, recursive=False, found_tool_callback=None):
"""Load CondaTarget objects from supplied artifact sources.
If a tool contains more than one requirement, the requirements will each
appear once in the output.
"""
conda_targets = set([])
real_paths = []
for path in paths:
if not os.path.exists(path):
targets = target_str_to_targets(path)
[conda_targets.add(_) for _ in targets]
else:
real_paths.append(path)
for (tool_path, tool_source) in yield_tool_sources_on_paths(ctx, real_paths, recursive=recursive, exclude_deprecated=True):
if found_tool_callback:
found_tool_callback(tool_path)
for target in tool_source_conda_targets(tool_source):
conda_targets.add(target)
return conda_targets
# Copied and modified from mulled stuff - need to syncronize these concepts.
def target_str_to_targets(targets_raw):
def parse_target(target_str):
if "=" in target_str:
package_name, version = target_str.split("=", 1)
else:
package_name = target_str
version = None
target = conda_util.CondaTarget(package_name, version)
return target
targets = [parse_target(_) for _ in targets_raw.split(",")]
return targets
def collect_conda_target_lists(ctx, paths, recursive=False, found_tool_callback=None):
"""Load CondaTarget lists from supplied artifact sources.
If a tool contains more than one requirement, the requirements will all
appear together as one list element of the output list.
"""
conda_target_lists, _ = collect_conda_target_lists_and_tool_paths(ctx, paths, recursive=recursive, found_tool_callback=found_tool_callback)
return conda_target_lists
def collect_conda_target_lists_and_tool_paths(ctx, paths, recursive=False, found_tool_callback=None):
"""Load CondaTarget lists from supplied artifact sources.
If a tool contains more than one requirement, the requirements will all
appear together as one list element of the output list.
"""
conda_target_lists = set([])
tool_paths = collections.defaultdict(list)
for (tool_path, tool_source) in yield_tool_sources_on_paths(ctx, paths, recursive=recursive, yield_load_errors=False):
if found_tool_callback:
found_tool_callback(tool_path)
targets = frozenset(tool_source_conda_targets(tool_source))
conda_target_lists.add(targets)
tool_paths[targets].append(tool_path)
# Turn them into lists so the order matches before returning...
conda_target_lists = list(conda_target_lists)
conda_target_tool_paths = [tool_paths[c] for c in conda_target_lists]
return conda_target_lists, conda_target_tool_paths
def tool_source_conda_targets(tool_source):
"""Load CondaTarget object from supplied abstract tool source."""
requirements, _ = tool_source.parse_requirements_and_containers()
return conda_util.requirements_to_conda_targets(requirements)
best_practice_search_first = threading.local()
def best_practice_search(conda_target, conda_context=None):
# Call it in offline mode after the first time.
try:
best_practice_search_first.previously_called
# TODO: Undo this...
offline = False
except AttributeError:
best_practice_search_first.previously_called = True
offline = False
if not conda_context:
conda_context = conda_util.CondaContext()
return conda_util.best_search_result(conda_target, conda_context=conda_context, channels_override=BEST_PRACTICE_CHANNELS, offline=offline)
__all__ = (
"BEST_PRACTICE_CHANNELS",
"best_practice_search",
"build_conda_context",
"collect_conda_targets",
"collect_conda_target_lists",
"collect_conda_target_lists_and_tool_paths",
"tool_source_conda_targets",
)
| 38.646707
| 143
| 0.716145
|
1b2c9bfc9c84a8ead8b1edd87a957ecbc1f220a1
| 14,141
|
py
|
Python
|
python27/1.0/lib/noarch/simple_http_server.py
|
vanish87/XX-Net
|
fb7eb546d860277c485c3d47772f04fa9845886a
|
[
"BSD-2-Clause"
] | 1
|
2018-07-17T00:39:38.000Z
|
2018-07-17T00:39:38.000Z
|
python27/1.0/lib/noarch/simple_http_server.py
|
TDUncle/XX-Net
|
24b2af60dc0abc1c26211813064bb14c1e22bac8
|
[
"BSD-2-Clause"
] | null | null | null |
python27/1.0/lib/noarch/simple_http_server.py
|
TDUncle/XX-Net
|
24b2af60dc0abc1c26211813064bb14c1e22bac8
|
[
"BSD-2-Clause"
] | 1
|
2016-04-01T06:25:17.000Z
|
2016-04-01T06:25:17.000Z
|
import os
import urlparse
import datetime
import threading
import mimetools
import socket
import errno
import sys
import select
import time
import json
import xlog
logging = xlog.getLogger("simple_http_server")
class HttpServerHandler():
default_request_version = "HTTP/1.1"
MessageClass = mimetools.Message
rbufsize = -1
wbufsize = 0
def __init__(self, sock, client, args):
self.connection = sock
self.rfile = socket._fileobject(self.connection, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.connection, "wb", self.wbufsize)
self.client_address = client
self.args = args
self.setup()
def setup(self):
pass
def handle(self):
#logging.info('Connected from %r', self.client_address)
while True:
try:
self.close_connection = 1
self.handle_one_request()
except Exception as e:
#logging.warn("handle err:%r close", e)
self.close_connection = 1
if self.close_connection:
break
self.connection.close()
#logging.debug("closed from %s:%d", self.client_address[0], self.client_address[1])
def address_string(self):
return '%s:%s' % self.client_address[:2]
def parse_request(self):
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
requestline = self.raw_requestline
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
if version[:5] != 'HTTP/':
self.send_error(400, "Bad request version (%r)" % version)
return False
try:
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(400, "Bad request version (%r)" % version)
return False
if version_number >= (1, 1):
self.close_connection = 0
if version_number >= (2, 0):
self.send_error(505,
"Invalid HTTP Version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = 1
if command != 'GET':
self.send_error(400,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(400, "Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive
self.headers = self.MessageClass(self.rfile, 0)
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = 1
elif conntype.lower() == 'keep-alive':
self.close_connection = 0
return True
def handle_one_request(self):
try:
try:
self.raw_requestline = self.rfile.readline(65537)
except Exception as e:
#logging.warn("simple server handle except %r", e)
return
if len(self.raw_requestline) > 65536:
#logging.warn("recv command line too large")
return
if not self.raw_requestline:
#logging.warn("closed")
return
self.parse_request()
if self.command == "GET":
self.do_GET()
elif self.command == "POST":
self.do_POST()
elif self.command == "CONNECT":
self.do_CONNECT()
elif self.command == "HEAD":
self.do_HEAD()
elif self.command == "DELETE":
self.do_DELETE()
elif self.command == "OPTIONS":
self.do_OPTIONS()
elif self.command == "PUT":
self.do_PUT()
else:
logging.warn("unhandler cmd:%s path:%s from:%s", self.command, self.path, self.address_string())
return
self.wfile.flush() #actually send the response if not already done.
self.close_connection = 0
except socket.error as e:
#logging.warn("socket error:%r", e)
pass
except IOError as e:
if e.errno == errno.EPIPE:
logging.warn("PIPE error:%r", e)
pass
else:
logging.warn("IOError:%r", e)
pass
#except OpenSSL.SSL.SysCallError as e:
# logging.warn("socket error:%r", e)
except Exception as e:
logging.exception("handler:%r cmd:%s path:%s from:%s", e, self.command, self.path, self.address_string())
pass
def do_GET(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_POST(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_PUT(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_DELETE(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_OPTIONS(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_HEAD(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def do_CONNECT(self):
logging.warn("unhandler cmd:%s from:%s", self.command, self.address_string())
def send_not_found(self):
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
def send_error(self, code, message=None):
self.wfile.write('HTTP/1.1 %d\r\n' % code)
self.wfile.write('Connection: close\r\n\r\n')
if message:
self.wfile.write(message)
def send_response(self, mimetype="", content="", headers="", status=200):
data = []
data.append('HTTP/1.1 %d\r\n' % status)
if len(mimetype):
data.append('Content-Type: %s\r\n' % mimetype)
data.append('Content-Length: %s\r\n' % len(content))
if len(headers):
if isinstance(headers, dict):
for key in headers:
data.append("%s: %s\r\n" % (key, headers[key]))
elif isinstance(headers, basestring):
data.append(headers)
data.append("\r\n")
if len(content) < 1024:
data.append(content)
data_str = "".join(data)
self.wfile.write(data_str)
else:
data_str = "".join(data)
self.wfile.write(data_str)
if len(content):
self.wfile.write(content)
def send_file(self, filename, mimetype):
try:
if not os.path.isfile(filename):
self.send_not_found()
return
file_size = os.path.getsize(filename)
tme = (datetime.datetime.today()+datetime.timedelta(minutes=330)).strftime('%a, %d %b %Y %H:%M:%S GMT')
head = 'HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nCache-Control:public, max-age=31536000\r\n'
head += 'Expires: %s\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (tme, mimetype, file_size)
self.wfile.write(head.encode())
with open(filename, 'rb') as fp:
while True:
data = fp.read(65535)
if not data:
break
self.wfile.write(data)
except:
pass
#logging.warn("download broken")
def response_json(self, res_arr):
data = json.dumps(res_arr, indent=0, sort_keys=True)
self.send_response('application/json', data)
class HTTPServer():
def __init__(self, address, handler, args=(), use_https=False, cert=""):
self.sockets = []
self.running = True
if isinstance(address, tuple):
self.server_address = [address]
else:
#server can listen multi-port
self.server_address = address
self.handler = handler
self.args = args
self.use_https = use_https
self.cert = cert
self.init_socket()
#logging.info("server %s:%d started.", address[0], address[1])
def init_socket(self):
for addr in self.server_address:
self.add_listen(addr)
def add_listen(self, addr):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.bind(addr)
except Exception as e:
logging.error("bind to %s:%d fail", addr[0], addr[1])
raise e
if self.use_https:
import OpenSSL
if hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_2_METHOD
elif hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_1_METHOD
elif hasattr(OpenSSL.SSL, "TLSv1_METHOD"):
ssl_version = OpenSSL.SSL.TLSv1_METHOD
ctx = OpenSSL.SSL.Context(ssl_version)
#server.pem's location (containing the server private key and the server certificate).
fpem = self.cert
ctx.use_privatekey_file(fpem)
ctx.use_certificate_file(fpem)
sock = OpenSSL.SSL.Connection(ctx, sock)
sock.listen(200)
self.sockets.append(sock)
logging.info("server %s:%d started.", addr[0], addr[1])
def serve_forever(self):
while self.running:
r, w, e = select.select(self.sockets, [], [], 3)
for rsock in r:
try:
(sock, address) = rsock.accept()
except IOError as e:
logging.warn("socket accept fail(errno: %s).", e.args[0])
if e.args[0] == 10022:
logging.info("restart socket server.")
self.server_close()
self.init_socket()
break
self.process_connect(sock, address)
def process_connect(self, sock, address):
#logging.debug("connect from %s:%d", address[0], address[1])
client_obj = self.handler(sock, address, self.args)
client_thread = threading.Thread(target=client_obj.handle)
client_thread.start()
def shutdown(self):
self.running = False
def server_close(self):
for sock in self.sockets:
sock.close()
self.sockets = []
class TestHttpServer(HttpServerHandler):
def __init__(self, sock, client, args):
self.data_path = args
HttpServerHandler.__init__(self, sock, client, args)
def generate_random_lowercase(self, n):
min_lc = ord(b'a')
len_lc = 26
ba = bytearray(os.urandom(n))
for i, b in enumerate(ba):
ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122
#sys.stdout.buffer.write(ba)
return ba
def do_GET(self):
url_path = urlparse.urlparse(self.path).path
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
logging.debug("GET %s from %s:%d", self.path, self.client_address[0], self.client_address[1])
if url_path == '/':
data = "OK\r\n"
self.wfile.write('HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nContent-Length: %d\r\n\r\n%s' %(len(data), data) )
elif url_path == '/null':
mimetype = "application/x-binary"
if "size" in reqs:
file_size = int(reqs['size'][0])
else:
file_size = 1024 * 1024 * 1024
self.wfile.write('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, file_size))
start = 0
data = self.generate_random_lowercase(65535)
while start < file_size:
left = file_size - start
send_batch = min(left, 65535)
self.wfile.write(data[:send_batch])
start += send_batch
else:
target = os.path.abspath(os.path.join(self.data_path, url_path[1:]))
if os.path.isfile(target):
self.send_file(target, "application/x-binary")
else:
self.wfile.write('HTTP/1.1 404\r\nContent-Length: 0\r\n\r\n' )
def main(data_path="."):
logging.info("listen http on 8880")
httpd = HTTPServer(('', 8880), TestHttpServer, data_path)
http_thread = threading.Thread(target=httpd.serve_forever)
http_thread.setDaemon(True)
http_thread.start()
while True:
time.sleep(10)
if __name__ == "__main__":
if len(sys.argv) > 2:
data_path = sys.argv[1]
else:
data_path = "."
try:
main(data_path=data_path)
except Exception:
import traceback
traceback.print_exc(file=sys.stdout)
except KeyboardInterrupt:
sys.exit()
| 35.8
| 130
| 0.556113
|
040916dccdb5e957eff30500a300cadfc544faa5
| 8,690
|
py
|
Python
|
scourgify/cleaning.py
|
yuskey/usaddress-scourgify
|
3cf4b953017433a071b523235a90c74348b245cc
|
[
"MIT"
] | 116
|
2018-07-17T18:55:33.000Z
|
2022-03-04T19:11:12.000Z
|
scourgify/cleaning.py
|
yuskey/usaddress-scourgify
|
3cf4b953017433a071b523235a90c74348b245cc
|
[
"MIT"
] | 14
|
2018-10-27T21:45:55.000Z
|
2022-02-17T15:57:00.000Z
|
scourgify/cleaning.py
|
yuskey/usaddress-scourgify
|
3cf4b953017433a071b523235a90c74348b245cc
|
[
"MIT"
] | 28
|
2018-09-05T13:00:42.000Z
|
2022-01-21T17:04:09.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
copyright (c) 2016-2017 Earth Advantage.
All rights reserved
..codeauthor::Fable Turas <fable@rainsoftware.tech>
[ INSERT DOC STRING ] # TODO
"""
# Imports from Standard Library
import re
import unicodedata
from typing import Any, Optional, Sequence, Union
# Imports from Third Party Modules
import usaddress
# Local Imports
from scourgify.address_constants import (
KNOWN_ODDITIES,
OCCUPANCY_TYPE_ABBREVIATIONS,
PROBLEM_ST_TYPE_ABBRVS,
)
# Setup
# Constants
# periods (in decimals), hyphens, / , and & are acceptable address components
# ord('&') ord('#') ord('-'), ord('.') and ord('/')
ALLOWED_CHARS = [35, 38, 45, 46, 47]
# Don't remove ',', '(' or ')' in PRE_CLEAN
PRECLEAN_EXCLUDE = [40, 41, 44]
EXCLUDE_ALL = ALLOWED_CHARS + PRECLEAN_EXCLUDE
STRIP_CHAR_CATS = (
'M', 'S', 'C', 'Nl', 'No', 'Pc', 'Ps', 'Pe', 'Pi', 'Pf', 'Po'
)
STRIP_PUNC_CATS = ('Z', 'Pd')
STRIP_ALL_CATS = STRIP_CHAR_CATS + STRIP_PUNC_CATS
# Data Structure Definitions
# Private Functions
# Public Classes and Functions
def pre_clean_addr_str(addr_str, state=None):
# type: (str, Optional[str]) -> str
"""Remove any known undesirable sub-strings and special characters.
Cleaning should be enacted on an addr_str to remove known characters
and phrases that might prevent usaddress from successfully parsing.
Follows USPS pub 28 guidelines for undesirable special characters.
Non-address phrases or character sets known to occur in raw addresses
should be added to address_constants.KNOWN_ODDITIES.
Some characters are left behind to potentially assist in second chance
processing of unparseable addresses and should be further cleaned
post_processing. (see post_clean_addr_str).
:param addr_str: raw address string
:type addr_str: str
:param state: optional string containing normalized state data.
:type state: str
:return: cleaned string
:rtype: str
"""
# replace any easily handled, undesirable sub-strings
if any(oddity in addr_str for oddity in KNOWN_ODDITIES.keys()):
for key, replacement in KNOWN_ODDITIES.items(): # pragma: no cover
addr_str = addr_str.replace(key, replacement)
# remove non-decimal point period chars.
if '.' in addr_str: # pragma: no cover
addr_str = clean_period_char(addr_str)
# remove special characters per USPS pub 28, except & which impacts
# intersection addresses, and - which impacts range addresses and zipcodes.
# ',', '(' and ')' are also left for potential use in additional line 2
# processing functions
addr_str = clean_upper(
addr_str, exclude=EXCLUDE_ALL, removal_cats=STRIP_CHAR_CATS
)
# to prevent any potential confusion between CT = COURT v CT = Connecticut,
# clean_ambiguous_street_types is not applied if state is CT.
if state and state != 'CT':
addr_str = clean_ambiguous_street_types(addr_str)
return addr_str
def clean_ambiguous_street_types(addr_str):
# type: (str) -> str
"""Clean street type abbreviations treated ambiguously by usaddress.
Some two char street type abbreviations (ie. CT) are treated as StateName
by usaddress when address lines are parsed in isolation. To correct this,
known problem abbreviations are converted to their whole word equivalent.
:param addr_str: string containing address street and occupancy data
without city and state.
:type addr_str: str | None
:return: original or cleaned addr_str
:rtype: str | None
"""
if addr_str:
split_addr = addr_str.split()
for key in PROBLEM_ST_TYPE_ABBRVS:
if key in split_addr:
split_addr[split_addr.index(key)] = PROBLEM_ST_TYPE_ABBRVS[key]
addr_str = ' '.join(split_addr)
break
return addr_str
def post_clean_addr_str(addr_str):
# type: (Union[str, None], Optional[bool]) -> str
"""Remove any special chars or extra white space remaining post-processing.
:param addr_str: post-processing address string.
:type addr_str: str | None
:param is_line2: optional boolean to trigger extra line 2 processing.
:type is_line2: bool
:return: str set to uppercase, extra white space and special chars removed.
:rtype: str
"""
if addr_str:
addr_str = clean_upper(
addr_str, exclude=ALLOWED_CHARS, removal_cats=STRIP_CHAR_CATS
)
return addr_str
def _parse_occupancy(addr_line_2):
occupancy = None
if addr_line_2:
parsed = None
# first try usaddress parsing labels
try:
parsed = usaddress.tag(addr_line_2)
except usaddress.RepeatedLabelError:
pass
if parsed:
occupancy = parsed[0].get('OccupancyIdentifier')
return occupancy
def strip_occupancy_type(addr_line_2):
# type: (str) -> str
"""Strip occupancy type (ie apt, unit, etc) from addr_line_2 string
:param addr_line_2: address line 2 string that may contain type
:type addr_line_2: str
:return:
:rtype: str
"""
occupancy = None
if addr_line_2:
addr_line_2 = addr_line_2.replace('#', '').strip().upper()
occupancy = _parse_occupancy(addr_line_2)
# if that doesn't work, clean abbrevs and try again
if not occupancy:
parts = str(addr_line_2).split()
for p in parts:
if p in OCCUPANCY_TYPE_ABBREVIATIONS:
addr_line_2 = addr_line_2.replace(
p, OCCUPANCY_TYPE_ABBREVIATIONS[p]
)
occupancy = _parse_occupancy(addr_line_2)
# if that doesn't work, dissect it manually
if not occupancy:
occupancy = addr_line_2
types = (
list(OCCUPANCY_TYPE_ABBREVIATIONS.keys())
+ list(OCCUPANCY_TYPE_ABBREVIATIONS.values())
)
if parts and len(parts) > 1:
ids = [p for p in parts if p not in types]
print(ids)
occupancy = ' '.join(ids)
return occupancy
def clean_upper(text, # type: Any
exclude=None, # type: Optional[Sequence[int]]
removal_cats=STRIP_CHAR_CATS, # type: Optional[Sequence[str]]
strip_spaces=False # type: Optional[bool]
):
# type: (str, Optional[Sequence[int]], Optional[Sequence[str]]) -> str
"""
Return text as upper case unicode string and remove unwanted characters.
Defaults to STRIP_CHARS e.g all whitespace, punctuation etc
:param text: text to clean
:type text: str
:param exclude: sequence of char ordinals to exclude from text.translate
:type exclude: Sequence
:param removal_cats: sequence of strings identifying unicodedata categories
(or startswith) of characters to be removed from text
:type removal_cats: Sequence
:param strip_spaces: Bool to indicate whether to leave or remove all
spaces. Default is False (leaves single spaces)
:type strip_spaces: bool
:return: cleaned uppercase unicode string
:rtype: str
"""
exclude = exclude or []
# coerce ints etc to str
if not isinstance(text, str): # pragma: no cover
text = str(text)
# catch and convert fractions
text = unicodedata.normalize('NFKD', text)
text = text.translate({8260: '/'})
# evaluate string without commas (,) or ampersand (&) to determine if
# further processing is necessary
alnum_text = text.translate({44: None, 38: None})
# remove unwanted non-alphanumeric characters and convert all dash type
# characters to hyphen
if not alnum_text.replace(' ', '').isalnum():
for char in text:
if (unicodedata.category(char).startswith(removal_cats)
and ord(char) not in exclude):
text = text.translate({ord(char): None})
elif unicodedata.category(char).startswith('Pd'):
text = text.translate({ord(char): '-'})
join_char = ' '
if strip_spaces:
join_char = ''
# remove extra spaces and convert to uppercase
return join_char.join(text.split()).upper()
def clean_period_char(text):
"""Remove all period characters that are not decimal points.
:param text: string text to clean
:type text: str
:return: cleaned string
:rtype: str
"""
period_pattern = re.compile(r'\.(?!\d)')
return re.sub(period_pattern, '', text)
| 34.621514
| 79
| 0.649942
|
951f1522d948f30ae6353f4b9ec88b76f6436330
| 2,825
|
py
|
Python
|
examples/start_here/chatbot_example.py
|
kkersten/NeMo
|
82f8d63c94820d3e6b12b58d9ced8fa6a5b44589
|
[
"Apache-2.0"
] | 1
|
2019-09-17T03:42:14.000Z
|
2019-09-17T03:42:14.000Z
|
examples/start_here/chatbot_example.py
|
kkersten/NeMo
|
82f8d63c94820d3e6b12b58d9ced8fa6a5b44589
|
[
"Apache-2.0"
] | null | null | null |
examples/start_here/chatbot_example.py
|
kkersten/NeMo
|
82f8d63c94820d3e6b12b58d9ced8fa6a5b44589
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import gzip
import shutil
import nemo
# Get Data
data_file = "movie_data.txt"
if not os.path.isfile(data_file):
with gzip.open("../../tests/data/movie_lines.txt.gz", 'rb') as f_in:
with open(data_file, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Configuration
config = {
"corpus_name": "cornell",
"datafile": data_file,
"attn_model": 'dot',
"hidden_size": 512,
"encoder_n_layers": 2,
"decoder_n_layers": 2,
"dropout": 0.1,
"voc_size": 6104 + 3,
"batch_size": 128,
"num_epochs": 15,
"optimizer_kind": "adam",
"learning_rate": 0.0003,
"tb_log_dir": "ChatBot",
}
# instantiate neural factory
nf = nemo.core.NeuralModuleFactory()
# instantiate neural modules
dl = nemo.tutorials.DialogDataLayer(**config)
encoder = nemo.tutorials.EncoderRNN(**config)
decoder = nemo.tutorials.LuongAttnDecoderRNN(**config)
L = nemo.tutorials.MaskedXEntropyLoss()
decoderInfer = nemo.tutorials.GreedyLuongAttnDecoderRNN(**config)
# PARAMETER SHARING: between training and auto-regressive inference decoders
decoderInfer.tie_weights_with(decoder, list(decoder.get_weights().keys()))
# express activations flow
src, src_lengths, tgt, mask, max_tgt_length = dl()
encoder_outputs, encoder_hidden = encoder(input_seq=src,
input_lengths=src_lengths)
outputs, hidden = decoder(targets=tgt, encoder_outputs=encoder_outputs,
max_target_len=max_tgt_length)
loss = L(predictions=outputs, target=tgt, mask=mask)
# run inference decoder to generate predictions
outputs_inf, _ = decoderInfer(encoder_outputs=encoder_outputs)
# define callback function which prints intermediate results to console
def outputs2words(tensors, vocab):
source_ids = tensors[1][:, 0].cpu().numpy().tolist()
response_ids = tensors[2][:, 0].cpu().numpy().tolist()
tgt_ids = tensors[3][:, 0].cpu().numpy().tolist()
source = list(map(lambda x: vocab[x], source_ids))
response = list(map(lambda x: vocab[x], response_ids))
target = list(map(lambda x: vocab[x], tgt_ids))
source = ' '.join([s for s in source if s != 'EOS' and s != 'PAD'])
response = ' '.join([s for s in response if s != 'EOS' and s != 'PAD'])
target = ' '.join([s for s in target if s != 'EOS' and s != 'PAD'])
print(f"Train Loss:{str(tensors[0].item())}")
print(f"SOURCE: {source} <---> PREDICTED RESPONSE: {response} "
f"<---> TARGET: {target}")
callback = nemo.core.SimpleLossLoggerCallback(
tensors=[loss, src, outputs_inf, tgt],
print_func=lambda x: outputs2words(x, dl.voc.index2word)
)
# start training
nf.train(
tensors_to_optimize=[loss],
callbacks=[callback],
optimizer="adam",
optimization_params={"num_epochs": config["num_epochs"], "lr": 0.001})
| 34.036145
| 76
| 0.675752
|
6dd8c6600411e271456d78f094780b6070adc7be
| 9,993
|
py
|
Python
|
app/tornado_handlers/browse.py
|
Firefly-Drone-Shows/flight_review
|
03f5f10df6b87348f6d8adccb2883f7e0576f129
|
[
"BSD-3-Clause"
] | null | null | null |
app/tornado_handlers/browse.py
|
Firefly-Drone-Shows/flight_review
|
03f5f10df6b87348f6d8adccb2883f7e0576f129
|
[
"BSD-3-Clause"
] | null | null | null |
app/tornado_handlers/browse.py
|
Firefly-Drone-Shows/flight_review
|
03f5f10df6b87348f6d8adccb2883f7e0576f129
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Tornado handler for the browse page
"""
from __future__ import print_function
import collections
import sys
import os
from datetime import datetime
import json
import sqlite3
import tornado.web
# this is needed for the following imports
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../plot_app'))
from config import get_db_filename, get_overview_img_filepath
from db_entry import DBData, DBDataGenerated
from helper import flight_modes_table, get_airframe_data, html_long_word_force_break
#pylint: disable=relative-beyond-top-level,too-many-statements
from .common import get_jinja_env, get_generated_db_data_from_log
BROWSE_TEMPLATE = 'browse.html'
#pylint: disable=abstract-method
class BrowseDataRetrievalHandler(tornado.web.RequestHandler):
""" Ajax data retrieval handler """
def get(self, *args, **kwargs):
""" GET request """
search_str = self.get_argument('search[value]', '').lower()
order_ind = int(self.get_argument('order[0][column]'))
order_dir = self.get_argument('order[0][dir]', '').lower()
data_start = int(self.get_argument('start'))
data_length = int(self.get_argument('length'))
draw_counter = int(self.get_argument('draw'))
json_output = dict()
json_output['draw'] = draw_counter
# get the logs (but only the public ones)
con = sqlite3.connect(get_db_filename(), detect_types=sqlite3.PARSE_DECLTYPES)
cur = con.cursor()
sql_order = ' ORDER BY Date DESC'
ordering_col = ['',#table row number
'Logs.Date',
'',#Overview - img
'Logs.Description',
'LogsGenerated.MavType',
'',#Airframe - not from DB
'LogsGenerated.Hardware',
'LogsGenerated.Software',
'LogsGenerated.Duration',
'LogsGenerated.StartTime',
'',#Rating
'LogsGenerated.NumLoggedErrors',
'', #FlightModes,
'LogsGenerated.UUID'
]
if ordering_col[order_ind] != '':
sql_order = ' ORDER BY ' + ordering_col[order_ind]
if order_dir == 'desc':
sql_order += ' DESC'
cur.execute('SELECT Logs.Id, Logs.Date, '
' Logs.Description, Logs.WindSpeed, '
' Logs.Rating, Logs.VideoUrl, '
' LogsGenerated.* '
'FROM Logs '
' LEFT JOIN LogsGenerated on Logs.Id=LogsGenerated.Id '
'WHERE Logs.Public = 1 AND NOT Logs.Source = "CI" '
+sql_order)
# pylint: disable=invalid-name
Columns = collections.namedtuple("Columns", "columns search_only_columns")
def get_columns_from_tuple(db_tuple, counter):
""" load the columns (list of strings) from a db_tuple
"""
db_data = DBDataJoin()
log_id = db_tuple[0]
log_date = db_tuple[1].strftime('%Y-%m-%d')
db_data.description = db_tuple[2]
db_data.feedback = ''
db_data.type = ''
db_data.wind_speed = db_tuple[3]
db_data.rating = db_tuple[4]
db_data.video_url = db_tuple[5]
generateddata_log_id = db_tuple[6]
if log_id != generateddata_log_id:
print('Join failed, loading and updating data')
db_data_gen = get_generated_db_data_from_log(log_id, con, cur)
if db_data_gen is None:
return None
db_data.add_generated_db_data_from_log(db_data_gen)
else:
db_data.duration_s = db_tuple[7]
db_data.mav_type = db_tuple[8]
db_data.estimator = db_tuple[9]
db_data.sys_autostart_id = db_tuple[10]
db_data.sys_hw = db_tuple[11]
db_data.ver_sw = db_tuple[12]
db_data.num_logged_errors = db_tuple[13]
db_data.num_logged_warnings = db_tuple[14]
db_data.flight_modes = \
{int(x) for x in db_tuple[15].split(',') if len(x) > 0}
db_data.ver_sw_release = db_tuple[16]
db_data.vehicle_uuid = db_tuple[17]
db_data.flight_mode_durations = \
[tuple(map(int, x.split(':'))) for x in db_tuple[18].split(',') if len(x) > 0]
db_data.start_time_utc = db_tuple[19]
# bring it into displayable form
ver_sw = db_data.ver_sw
if len(ver_sw) > 10:
ver_sw = ver_sw[:6]
if len(db_data.ver_sw_release) > 0:
try:
release_split = db_data.ver_sw_release.split()
release_type = int(release_split[1])
if release_type == 255: # it's a release
ver_sw = release_split[0]
except:
pass
airframe_data = get_airframe_data(db_data.sys_autostart_id)
if airframe_data is None:
airframe = db_data.sys_autostart_id
else:
airframe = airframe_data['name']
flight_modes = ', '.join([flight_modes_table[x][0]
for x in db_data.flight_modes if x in
flight_modes_table])
m, s = divmod(db_data.duration_s, 60)
h, m = divmod(m, 60)
duration_str = '{:d}:{:02d}:{:02d}'.format(h, m, s)
start_time_str = 'N/A'
if db_data.start_time_utc != 0:
try:
start_datetime = datetime.fromtimestamp(db_data.start_time_utc)
start_time_str = start_datetime.strftime("%Y-%m-%d %H:%M")
except ValueError as value_error:
# bogus date
print(value_error)
# make sure to break long descriptions w/o spaces (otherwise they
# mess up the layout)
description = html_long_word_force_break(db_data.description)
search_only_columns = []
if db_data.ver_sw is not None:
search_only_columns.append(db_data.ver_sw)
if db_data.ver_sw_release is not None:
search_only_columns.append(db_data.ver_sw_release)
# if db_data.vehicle_uuid is not None:
# search_only_columns.append(db_data.vehicle_uuid)
image_col = '<div class="no_map_overview"> Not rendered / No GPS </div>'
image_filename = os.path.join(get_overview_img_filepath(), log_id+'.png')
if os.path.exists(image_filename):
image_col = '<img class="map_overview" src="/overview_img/'
image_col += log_id+'.png" alt="Overview Image Load Failed" height=50/>'
return Columns([
counter,
'<a href="plot_app?log='+log_id+'">'+log_date+'</a>',
image_col,
description,
db_data.mav_type,
airframe,
db_data.sys_hw,
ver_sw,
duration_str,
start_time_str,
db_data.rating_str(),
db_data.num_logged_errors,
flight_modes,
db_data.vehicle_uuid
], search_only_columns)
# need to fetch all here, because we will do more SQL calls while
# iterating (having multiple cursor's does not seem to work)
db_tuples = cur.fetchall()
json_output['recordsTotal'] = len(db_tuples)
json_output['data'] = []
if data_length == -1:
data_length = len(db_tuples)
filtered_counter = 0
if search_str == '':
# speed-up the request by iterating only over the requested items
counter = data_start
for i in range(data_start, min(data_start + data_length, len(db_tuples))):
counter += 1
columns = get_columns_from_tuple(db_tuples[i], counter)
if columns is None:
continue
json_output['data'].append(columns.columns)
filtered_counter = len(db_tuples)
else:
counter = 1
for db_tuple in db_tuples:
counter += 1
columns = get_columns_from_tuple(db_tuple, counter)
if columns is None:
continue
if any(search_str in str(column).lower() for column in \
(columns.columns, columns.search_only_columns)):
if data_start <= filtered_counter < data_start + data_length:
json_output['data'].append(columns.columns)
filtered_counter += 1
cur.close()
con.close()
json_output['recordsFiltered'] = filtered_counter
self.set_header('Content-Type', 'application/json')
self.write(json.dumps(json_output))
class DBDataJoin(DBData, DBDataGenerated):
"""Class for joined Data"""
def add_generated_db_data_from_log(self, source):
"""Update joined data by parent data"""
self.__dict__.update(source.__dict__)
class BrowseHandler(tornado.web.RequestHandler):
""" Browse public log file Tornado request handler """
def get(self, *args, **kwargs):
""" GET request """
template = get_jinja_env().get_template(BROWSE_TEMPLATE)
template_args = {}
search_str = self.get_argument('search', '').lower()
if len(search_str) > 0:
template_args['initial_search'] = json.dumps(search_str)
self.write(template.render(template_args))
| 38.883268
| 97
| 0.555589
|
c2d6556dc9fe90e6b5693517e1ad9e5d72cc743e
| 25,576
|
py
|
Python
|
pyscf/gto/moleintor.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/gto/moleintor.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
pyscf/gto/moleintor.py
|
KMCzajkowski/pyscf
|
e8af41d910cc0d3963655120c0b689590ad978e7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import ctypes
import numpy
from pyscf import lib
libcgto = lib.load_library('libcgto')
ANG_OF = 1
NPRIM_OF = 2
NCTR_OF = 3
KAPPA_OF = 4
PTR_EXP = 5
PTR_COEFF = 6
BAS_SLOTS = 8
def getints(intor_name, atm, bas, env, shls_slice=None, comp=1, hermi=0,
aosym='s1', ao_loc=None, cintopt=None, out=None):
r'''1e and 2e integral generator.
Args:
intor_name : str
================================ =============
Function Expression
================================ =============
"int1e_ovlp_sph" ( \| \)
"int1e_nuc_sph" ( \| nuc \| \)
"int1e_kin_sph" (.5 \| p dot p\)
"int1e_ia01p_sph" (#C(0 1) \| nabla-rinv \| cross p\)
"int1e_giao_irjxp_sph" (#C(0 1) \| r cross p\)
"int1e_cg_irxp_sph" (#C(0 1) \| rc cross p\)
"int1e_giao_a11part_sph" (-.5 \| nabla-rinv \| r\)
"int1e_cg_a11part_sph" (-.5 \| nabla-rinv \| rc\)
"int1e_a01gp_sph" (g \| nabla-rinv cross p \|\)
"int1e_igkin_sph" (#C(0 .5) g \| p dot p\)
"int1e_igovlp_sph" (#C(0 1) g \|\)
"int1e_ignuc_sph" (#C(0 1) g \| nuc \|\)
"int1e_z_sph" ( \| zc \| \)
"int1e_zz_sph" ( \| zc zc \| \)
"int1e_r_sph" ( \| rc \| \)
"int1e_r2_sph" ( \| rc dot rc \| \)
"int1e_rr_sph" ( \| rc rc \| \)
"int1e_pnucp_sph" (p* \| nuc dot p \| \)
"int1e_prinvxp_sph" (p* \| rinv cross p \| \)
"int1e_ovlp_spinor" ( \| \)
"int1e_nuc_spinor" ( \| nuc \|\)
"int1e_srsr_spinor" (sigma dot r \| sigma dot r\)
"int1e_sr_spinor" (sigma dot r \|\)
"int1e_srsp_spinor" (sigma dot r \| sigma dot p\)
"int1e_spsp_spinor" (sigma dot p \| sigma dot p\)
"int1e_sp_spinor" (sigma dot p \|\)
"int1e_spnucsp_spinor" (sigma dot p \| nuc \| sigma dot p\)
"int1e_srnucsr_spinor" (sigma dot r \| nuc \| sigma dot r\)
"int1e_govlp_spinor" (g \|\)
"int1e_gnuc_spinor" (g \| nuc \|\)
"int1e_cg_sa10sa01_spinor" (.5 sigma cross rc \| sigma cross nabla-rinv \|\)
"int1e_cg_sa10sp_spinor" (.5 rc cross sigma \| sigma dot p\)
"int1e_cg_sa10nucsp_spinor" (.5 rc cross sigma \| nuc \| sigma dot p\)
"int1e_giao_sa10sa01_spinor" (.5 sigma cross r \| sigma cross nabla-rinv \|\)
"int1e_giao_sa10sp_spinor" (.5 r cross sigma \| sigma dot p\)
"int1e_giao_sa10nucsp_spinor" (.5 r cross sigma \| nuc \| sigma dot p\)
"int1e_sa01sp_spinor" (\| nabla-rinv cross sigma \| sigma dot p\)
"int1e_spgsp_spinor" (g sigma dot p \| sigma dot p\)
"int1e_spgnucsp_spinor" (g sigma dot p \| nuc \| sigma dot p\)
"int1e_spgsa01_spinor" (g sigma dot p \| nabla-rinv cross sigma \|\)
"int1e_spspsp_spinor" (sigma dot p \| sigma dot p sigma dot p\)
"int1e_spnuc_spinor" (sigma dot p \| nuc \|\)
"int1e_ovlp_cart" ( \| \)
"int1e_nuc_cart" ( \| nuc \| \)
"int1e_kin_cart" (.5 \| p dot p\)
"int1e_ia01p_cart" (#C(0 1) \| nabla-rinv \| cross p\)
"int1e_giao_irjxp_cart" (#C(0 1) \| r cross p\)
"int1e_cg_irxp_cart" (#C(0 1) \| rc cross p\)
"int1e_giao_a11part_cart" (-.5 \| nabla-rinv \| r\)
"int1e_cg_a11part_cart" (-.5 \| nabla-rinv \| rc\)
"int1e_a01gp_cart" (g \| nabla-rinv cross p \|\)
"int1e_igkin_cart" (#C(0 .5) g \| p dot p\)
"int1e_igovlp_cart" (#C(0 1) g \|\)
"int1e_ignuc_cart" (#C(0 1) g \| nuc \|\)
"int1e_ipovlp_sph" (nabla \|\)
"int1e_ipkin_sph" (.5 nabla \| p dot p\)
"int1e_ipnuc_sph" (nabla \| nuc \|\)
"int1e_iprinv_sph" (nabla \| rinv \|\)
"int1e_rinv_sph" (\| rinv \|\)
"int1e_ipovlp_spinor" (nabla \|\)
"int1e_ipkin_spinor" (.5 nabla \| p dot p\)
"int1e_ipnuc_spinor" (nabla \| nuc \|\)
"int1e_iprinv_spinor" (nabla \| rinv \|\)
"int1e_ipspnucsp_spinor" (nabla sigma dot p \| nuc \| sigma dot p\)
"int1e_ipsprinvsp_spinor" (nabla sigma dot p \| rinv \| sigma dot p\)
"int1e_ipovlp_cart" (nabla \|\)
"int1e_ipkin_cart" (.5 nabla \| p dot p\)
"int1e_ipnuc_cart" (nabla \| nuc \|\)
"int1e_iprinv_cart" (nabla \| rinv \|\)
"int1e_rinv_cart" (\| rinv \|\)
"int2e_p1vxp1_sph" ( p* \, cross p \| \, \) ; SSO
"int2e_sph" ( \, \| \, \)
"int2e_ig1_sph" (#C(0 1) g \, \| \, \)
"int2e_spinor" (, \| \, \)
"int2e_spsp1_spinor" (sigma dot p \, sigma dot p \| \, \)
"int2e_spsp1spsp2_spinor" (sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p \)
"int2e_srsr1_spinor" (sigma dot r \, sigma dot r \| \,\)
"int2e_srsr1srsr2_spinor" (sigma dot r \, sigma dot r \| sigma dot r \, sigma dot r\)
"int2e_cg_sa10sp1_spinor" (.5 rc cross sigma \, sigma dot p \| \,\)
"int2e_cg_sa10sp1spsp2_spinor" (.5 rc cross sigma \, sigma dot p \| sigma dot p \, sigma dot p \)
"int2e_giao_sa10sp1_spinor" (.5 r cross sigma \, sigma dot p \| \,\)
"int2e_giao_sa10sp1spsp2_spinor" (.5 r cross sigma \, sigma dot p \| sigma dot p \, sigma dot p \)
"int2e_g1_spinor" (g \, \| \,\)
"int2e_spgsp1_spinor" (g sigma dot p \, sigma dot p \| \,\)
"int2e_g1spsp2_spinor" (g \, \| sigma dot p \, sigma dot p\)
"int2e_spgsp1spsp2_spinor" (g sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p\)
"int2e_spv1_spinor" (sigma dot p \, \| \,\)
"int2e_vsp1_spinor" (\, sigma dot p \| \,\)
"int2e_spsp2_spinor" (\, \| sigma dot p \, sigma dot p\)
"int2e_spv1spv2_spinor" (sigma dot p \, \| sigma dot p \,\)
"int2e_vsp1spv2_spinor" (\, sigma dot p \| sigma dot p \,\)
"int2e_spv1vsp2_spinor" (sigma dot p \, \| \, sigma dot p\)
"int2e_vsp1vsp2_spinor" (\, sigma dot p \| \, sigma dot p\)
"int2e_spv1spsp2_spinor" (sigma dot p \, \| sigma dot p \, sigma dot p\)
"int2e_vsp1spsp2_spinor" (\, sigma dot p \| sigma dot p \, sigma dot p\)
"int2e_ig1_cart" (#C(0 1) g \, \| \, \)
"int2e_ip1_sph" (nabla \, \| \,\)
"int2e_ip1_spinor" (nabla \, \| \,\)
"int2e_ipspsp1_spinor" (nabla sigma dot p \, sigma dot p \| \,\)
"int2e_ip1spsp2_spinor" (nabla \, \| sigma dot p \, sigma dot p\)
"int2e_ipspsp1spsp2_spinor" (nabla sigma dot p \, sigma dot p \| sigma dot p \, sigma dot p\)
"int2e_ipsrsr1_spinor" (nabla sigma dot r \, sigma dot r \| \,\)
"int2e_ip1srsr2_spinor" (nabla \, \| sigma dot r \, sigma dot r\)
"int2e_ipsrsr1srsr2_spinor" (nabla sigma dot r \, sigma dot r \| sigma dot r \, sigma dot r\)
"int2e_ip1_cart" (nabla \, \| \,\)
"int2e_ssp1ssp2_spinor" ( \, sigma dot p \| gaunt \| \, sigma dot p\)
"int2e_cg_ssa10ssp2_spinor" (rc cross sigma \, \| gaunt \| \, sigma dot p\)
"int2e_giao_ssa10ssp2_spinor" (r cross sigma \, \| gaunt \| \, sigma dot p\)
"int2e_gssp1ssp2_spinor" (g \, sigma dot p \| gaunt \| \, sigma dot p\)
"int2e_ipip1_sph" ( nabla nabla \, \| \, \)
"int2e_ipvip1_sph" ( nabla \, nabla \| \, \)
"int2e_ip1ip2_sph" ( nabla \, \| nabla \, \)
"int3c2e_ip1_sph" (nabla \, \| \)
"int3c2e_ip2_sph" ( \, \| nabla\)
"int2c2e_ip1_sph" (nabla \| r12 \| \)
"int3c2e_spinor" (nabla \, \| \)
"int3c2e_spsp1_spinor" (nabla \, \| \)
"int3c2e_ip1_spinor" (nabla \, \| \)
"int3c2e_ip2_spinor" ( \, \| nabla\)
"int3c2e_ipspsp1_spinor" (nabla sigma dot p \, sigma dot p \| \)
"int3c2e_spsp1ip2_spinor" (sigma dot p \, sigma dot p \| nabla \)
================================ =============
atm : int32 ndarray
libcint integral function argument
bas : int32 ndarray
libcint integral function argument
env : float64 ndarray
libcint integral function argument
Kwargs:
shls_slice : 8-element list
(ish_start, ish_end, jsh_start, jsh_end, ksh_start, ksh_end, lsh_start, lsh_end)
comp : int
Components of the integrals, e.g. int1e_ipovlp has 3 components.
hermi : int (1e integral only)
Symmetry of the 1e integrals
| 0 : no symmetry assumed (default)
| 1 : hermitian
| 2 : anti-hermitian
aosym : str (2e integral only)
Symmetry of the 2e integrals
| 4 or '4' or 's4': 4-fold symmetry (default)
| '2ij' or 's2ij' : symmetry between i, j in (ij|kl)
| '2kl' or 's2kl' : symmetry between k, l in (ij|kl)
| 1 or '1' or 's1': no symmetry
out : ndarray (2e integral only)
array to store the 2e AO integrals
Returns:
ndarray of 1-electron integrals, can be either 2-dim or 3-dim, depending on comp
Examples:
>>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')
>>> gto.getints('int1e_ipnuc_sph', mol._atm, mol._bas, mol._env, comp=3) # <nabla i | V_nuc | j>
[[[ 0. 0. ]
[ 0. 0. ]]
[[ 0. 0. ]
[ 0. 0. ]]
[[ 0.10289944 0.48176097]
[-0.48176097 -0.10289944]]]
'''
intor_name = ascint3(intor_name)
if (intor_name.startswith('int1e') or
intor_name.startswith('ECP') or
intor_name.startswith('int2c2e')):
return getints2c(intor_name, atm, bas, env, shls_slice, comp,
hermi, ao_loc, cintopt, out)
elif intor_name.startswith('int2e') or intor_name.startswith('int4c1e'):
return getints4c(intor_name, atm, bas, env, shls_slice, comp,
aosym, ao_loc, cintopt, out)
elif intor_name.startswith('int3c'):
return getints3c(intor_name, atm, bas, env, shls_slice, comp,
aosym, ao_loc, cintopt, out)
else:
raise RuntimeError('Unknown intor %s' % intor_name)
def getints2c(intor_name, atm, bas, env, shls_slice=None, comp=1, hermi=0,
ao_loc=None, cintopt=None, out=None):
atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = atm.shape[0]
nbas = bas.shape[0]
if shls_slice is None:
shls_slice = (0, nbas, 0, nbas)
else:
assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas)
if ao_loc is None:
ao_loc = make_loc(bas, intor_name)
i0, i1, j0, j1 = shls_slice[:4]
naoi = ao_loc[i1] - ao_loc[i0]
naoj = ao_loc[j1] - ao_loc[j0]
if intor_name.endswith('_cart') or intor_name.endswith('_sph'):
mat = numpy.ndarray((naoi,naoj,comp), numpy.double, out, order='F')
drv_name = 'GTOint2c'
else:
mat = numpy.ndarray((naoi,naoj,comp), numpy.complex, out, order='F')
if '2c2e' in intor_name:
assert(hermi != lib.HERMITIAN and
hermi != lib.ANTIHERMI)
drv_name = 'GTOint2c_spinor'
if cintopt is None:
cintopt = make_cintopt(atm, bas, env, intor_name)
# cintopt = lib.c_null_ptr()
fn = getattr(libcgto, drv_name)
fn(getattr(libcgto, intor_name), mat.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(comp), ctypes.c_int(hermi),
(ctypes.c_int*4)(*(shls_slice[:4])),
ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),
env.ctypes.data_as(ctypes.c_void_p))
mat = mat.transpose(2,0,1)
if comp == 1:
mat = mat[0]
return mat
def getints3c(intor_name, atm, bas, env, shls_slice=None, comp=1,
aosym='s1', ao_loc=None, cintopt=None, out=None):
atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = atm.shape[0]
nbas = bas.shape[0]
if shls_slice is None:
shls_slice = (0, nbas, 0, nbas, 0, nbas)
else:
assert(shls_slice[1] <= nbas and
shls_slice[3] <= nbas and
shls_slice[5] <= nbas)
i0, i1, j0, j1, k0, k1 = shls_slice[:6]
if ao_loc is None:
ao_loc = make_loc(bas, intor_name)
if k0 > j1 and k0 > i1:
if 'ssc' in intor_name:
ao_loc[k0-1:] = ao_loc[k0] + make_loc(bas[k0:], 'cart')
elif 'spinor' in intor_name:
ao_loc[k0-1:] = ao_loc[k0] + make_loc(bas[k0:], intor_name)
naok = ao_loc[k1] - ao_loc[k0]
if aosym in ('s1',):
naoi = ao_loc[i1] - ao_loc[i0]
naoj = ao_loc[j1] - ao_loc[j0]
shape = (naoi, naoj, naok, comp)
else:
aosym = 's2ij'
nij = ao_loc[i1]*(ao_loc[i1]+1)//2 - ao_loc[i0]*(ao_loc[i0]+1)//2
shape = (nij, naok, comp)
if 'spinor' in intor_name:
mat = numpy.ndarray(shape, numpy.complex, out, order='F')
drv = libcgto.GTOr3c_drv
fill = getattr(libcgto, 'GTOr3c_fill_'+aosym)
else:
mat = numpy.ndarray(shape, numpy.double, out, order='F')
drv = libcgto.GTOnr3c_drv
fill = getattr(libcgto, 'GTOnr3c_fill_'+aosym)
if cintopt is None:
cintopt = make_cintopt(atm, bas, env, intor_name)
drv(getattr(libcgto, intor_name), fill,
mat.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp),
(ctypes.c_int*6)(*(shls_slice[:6])),
ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,
atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),
bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),
env.ctypes.data_as(ctypes.c_void_p))
mat = numpy.rollaxis(mat, -1, 0)
if comp == 1:
mat = mat[0]
return mat
def getints4c(intor_name, atm, bas, env, shls_slice=None, comp=1,
aosym='s1', ao_loc=None, cintopt=None, out=None):
aosym = _stand_sym_code(aosym)
atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
env = numpy.asarray(env, dtype=numpy.double, order='C')
c_atm = atm.ctypes.data_as(ctypes.c_void_p)
c_bas = bas.ctypes.data_as(ctypes.c_void_p)
c_env = env.ctypes.data_as(ctypes.c_void_p)
natm = atm.shape[0]
nbas = bas.shape[0]
ao_loc = make_loc(bas, intor_name)
if cintopt is None:
cintopt = make_cintopt(atm, bas, env, intor_name)
if aosym == 's8':
#assert(intor_name in ('int2e_sph', 'int2e_cart'))
assert(shls_slice is None)
libcvhf = lib.load_library('libcvhf')
nao = ao_loc[-1]
nao_pair = nao*(nao+1)//2
out = numpy.ndarray((nao_pair*(nao_pair+1)//2), buffer=out)
drv = libcvhf.GTO2e_cart_or_sph
drv(getattr(libcgto, intor_name),
out.ctypes.data_as(ctypes.c_void_p),
ao_loc.ctypes.data_as(ctypes.c_void_p),
c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env)
return out
else:
if shls_slice is None:
shls_slice = (0, nbas, 0, nbas, 0, nbas, 0, nbas)
elif len(shls_slice) == 4:
shls_slice = shls_slice + (0, nbas, 0, nbas)
else:
assert(shls_slice[1] <= nbas and shls_slice[3] <= nbas and
shls_slice[5] <= nbas and shls_slice[7] <= nbas)
i0, i1, j0, j1, k0, k1, l0, l1 = shls_slice
naoi = ao_loc[i1] - ao_loc[i0]
naoj = ao_loc[j1] - ao_loc[j0]
naok = ao_loc[k1] - ao_loc[k0]
naol = ao_loc[l1] - ao_loc[l0]
if aosym in ('s4', 's2ij'):
nij = naoi * (naoi + 1) // 2
assert(numpy.all(ao_loc[i0:i1]-ao_loc[i0] == ao_loc[j0:j1]-ao_loc[j0]))
else:
nij = naoi * naoj
if aosym in ('s4', 's2kl'):
nkl = naok * (naok + 1) // 2
assert(numpy.all(ao_loc[k0:k1]-ao_loc[k0] == ao_loc[l0:l1]-ao_loc[l0]))
else:
nkl = naok * naol
if comp == 1:
out = numpy.ndarray((nij,nkl), buffer=out)
else:
out = numpy.ndarray((comp,nij,nkl), buffer=out)
prescreen = lib.c_null_ptr()
drv = libcgto.GTOnr2e_fill_drv
drv(getattr(libcgto, intor_name),
getattr(libcgto, 'GTOnr2e_fill_'+aosym), prescreen,
out.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(comp),
(ctypes.c_int*8)(*shls_slice),
ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt,
c_atm, ctypes.c_int(natm), c_bas, ctypes.c_int(nbas), c_env)
return out
def getints_by_shell(intor_name, shls, atm, bas, env, comp=1):
r'''For given 2, 3 or 4 shells, interface for libcint to get 1e, 2e,
2-center-2e or 3-center-2e integrals
Args:
intor_name : str
See also :func:`getints` for the supported intor_name
shls : list of int
The AO shell-ids of the integrals
atm : int32 ndarray
libcint integral function argument
bas : int32 ndarray
libcint integral function argument
env : float64 ndarray
libcint integral function argument
Kwargs:
comp : int
Components of the integrals, e.g. int1e_ipovlp has 3 components.
Returns:
ndarray of 2-dim to 5-dim, depending on the integral type (1e,
2e, 3c-2e, 2c2e) and the value of comp
Examples:
The gradients of the spherical 2e integrals
>>> mol.build(atom='H 0 0 0; H 0 0 1.1', basis='sto-3g')
>>> gto.getints_by_shell('int2e_ip1_sph', (0,1,0,1), mol._atm, mol._bas, mol._env, comp=3)
[[[[[-0. ]]]]
[[[[-0. ]]]]
[[[[-0.08760462]]]]]
'''
intor_name = ascint3(intor_name)
atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(atm.shape[0])
nbas = ctypes.c_int(bas.shape[0])
if intor_name.endswith('_cart'):
dtype = numpy.double
def num_cgto_of(basid):
l = bas[basid,ANG_OF]
return (l+1)*(l+2)//2 * bas[basid,NCTR_OF]
elif intor_name.endswith('_sph'):
dtype = numpy.double
def num_cgto_of(basid):
l = bas[basid,ANG_OF]
return (l*2+1) * bas[basid,NCTR_OF]
else:
from pyscf.gto.mole import len_spinor
dtype = numpy.complex
def num_cgto_of(basid):
l = bas[basid,ANG_OF]
k = bas[basid,KAPPA_OF]
return len_spinor(l,k) * bas[basid,NCTR_OF]
null = lib.c_null_ptr()
if intor_name.startswith('int3c'):
assert(len(shls) == 3)
di = num_cgto_of(shls[0])
dj = num_cgto_of(shls[1])
l = bas[shls[2],ANG_OF]
if intor_name.endswith('_ssc'): # mixed spherical-cartesian
dk = (l+1)*(l+2)//2 * bas[shls[2],NCTR_OF]
else:
dk = (l*2+1) * bas[shls[2],NCTR_OF]
buf = numpy.empty((di,dj,dk,comp), dtype, order='F')
fintor = getattr(libcgto, intor_name)
fintor(buf.ctypes.data_as(ctypes.c_void_p),
null, (ctypes.c_int*3)(*shls),
atm.ctypes.data_as(ctypes.c_void_p), natm,
bas.ctypes.data_as(ctypes.c_void_p), nbas,
env.ctypes.data_as(ctypes.c_void_p), null, null)
if comp == 1:
return buf.reshape(di,dj,dk)
else:
return buf.transpose(3,0,1,2)
elif intor_name.startswith('int2e') or intor_name.startswith('int4c'):
assert(len(shls) == 4)
di, dj, dk, dl = [num_cgto_of(x) for x in shls]
buf = numpy.empty((di,dj,dk,dl,comp), dtype, order='F')
fintor = getattr(libcgto, intor_name)
fintor(buf.ctypes.data_as(ctypes.c_void_p),
null, (ctypes.c_int*4)(*shls),
atm.ctypes.data_as(ctypes.c_void_p), natm,
bas.ctypes.data_as(ctypes.c_void_p), nbas,
env.ctypes.data_as(ctypes.c_void_p), null, null)
if comp == 1:
return buf.reshape(di,dj,dk,dl)
else:
return buf.transpose(4,0,1,2,3)
elif (intor_name.startswith('int2c') or '1e' in intor_name or
'ECP' in intor_name):
assert(len(shls) == 2)
di = num_cgto_of(shls[0])
dj = num_cgto_of(shls[1])
buf = numpy.empty((di,dj,comp), dtype, order='F')
fintor = getattr(libcgto, intor_name)
fintor(buf.ctypes.data_as(ctypes.c_void_p),
null, (ctypes.c_int*2)(*shls),
atm.ctypes.data_as(ctypes.c_void_p), natm,
bas.ctypes.data_as(ctypes.c_void_p), nbas,
env.ctypes.data_as(ctypes.c_void_p), null, null)
if comp == 1:
return buf.reshape(di,dj)
else:
return buf.transpose(2,0,1)
else:
raise RuntimeError('Unknown intor %s' % intor_name)
def make_loc(bas, key):
if 'cart' in key:
l = bas[:,ANG_OF]
dims = (l+1)*(l+2)//2 * bas[:,NCTR_OF]
elif 'sph' in key:
dims = (bas[:,ANG_OF]*2+1) * bas[:,NCTR_OF]
else: # spinor
l = bas[:,ANG_OF]
k = bas[:,KAPPA_OF]
dims = (l*4+2) * bas[:,NCTR_OF]
dims[k<0] = (l[k<0] * 2 + 2) * bas[k<0,NCTR_OF]
dims[k>0] = (l[k>0] * 2 ) * bas[k>0,NCTR_OF]
ao_loc = numpy.empty(len(dims)+1, dtype=numpy.int32)
ao_loc[0] = 0
dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])
return ao_loc
def make_cintopt(atm, bas, env, intor):
intor = intor.replace('_sph','').replace('_cart','').replace('_spinor','')
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = c_atm.shape[0]
nbas = c_bas.shape[0]
cintopt = lib.c_null_ptr()
foptinit = getattr(libcgto, intor+'_optimizer')
foptinit(ctypes.byref(cintopt),
c_atm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(natm),
c_bas.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(nbas),
c_env.ctypes.data_as(ctypes.c_void_p))
return ctypes.cast(cintopt, _cintoptHandler)
class _cintoptHandler(ctypes.c_void_p):
def __del__(self):
libcgto.CINTdel_optimizer(ctypes.byref(self))
def _stand_sym_code(sym):
if isinstance(sym, int):
return 's%d' % sym
elif sym[0] in 'sS':
return sym.lower()
else:
return 's' + sym.lower()
def ascint3(intor_name):
'''convert cint2 function name to cint3 function name'''
if intor_name.startswith('cint'):
intor_name = intor_name[1:]
if not (intor_name.endswith('_cart') or
intor_name.endswith('_sph') or
intor_name.endswith('_spinor')):
intor_name = intor_name + '_spinor'
return intor_name
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.verbose = 0
mol.output = None
mol.atom.extend([
["H", (0, 0, 0 )],
["H", (0, 0, 1 )],
])
mol.basis = {"H": 'cc-pvdz'}
mol.build()
mol.set_rinv_origin(mol.atom_coord(0))
for i in range(mol.nbas):
for j in range(mol.nbas):
print(i, j, getints_by_shell('int1e_prinvxp_sph', (i,j),
mol._atm, mol._bas, mol._env, 3))
| 44.713287
| 112
| 0.52127
|
6e1d7eb832981d248891e4afcb77258a1ba66811
| 2,141
|
py
|
Python
|
utils.py
|
AdiPat/mongotools
|
c3650800c6a925aab267bf2acaf073d7bd65a4cf
|
[
"MIT"
] | null | null | null |
utils.py
|
AdiPat/mongotools
|
c3650800c6a925aab267bf2acaf073d7bd65a4cf
|
[
"MIT"
] | null | null | null |
utils.py
|
AdiPat/mongotools
|
c3650800c6a925aab267bf2acaf073d7bd65a4cf
|
[
"MIT"
] | null | null | null |
#
# utils.py: Utility functions
#
import os
import logging
import traceback
from bson import json_util
import pymongo
from config import CONFIG
# File Processing
def get_json_filenames():
"""Returns list of file names with json extension from data/import folder"""
filenames = os.listdir(CONFIG['DATA_PATH'] + "/import")
json_filenames = []
for f in filenames:
fname, ext = f.split('.')
if ext == 'json':
json_filenames.append(f)
return json_filenames
def read_json(fname):
"""Reads json file and returns data as python dictionary"""
data = None
with open(fname) as f:
try:
data = json_util.loads(f.read())
res = data
except:
logging.error("read_json(): Failed to read file " + fname)
traceback.print_exc()
return data
def write_json(fname, data):
"""Writes data to DATA_PATH/export/fname.json"""
path = CONFIG['DATA_PATH'] + "/export"
filepath = path + "/fname" + ".json"
status = False
try:
with open(filepath, 'w') as f:
rawData = json_util.dumps(data)
f.write(rawData)
status = True
except:
logging.error("write_json(): Failed to write data to file " + filepath)
traceback.print_exc()
return status
# Database Utilities
def get_db():
"""Returns database object after connecting to mongo server"""
db = None
try:
client = pymongo.MongoClient(CONFIG['MONGO_SERVER'])
db = client[CONFIG['DB_NAME']]
except:
logging.error("get_db(): Failed to connect to database")
logging.error("get_db(): Check MONG_SERVER and DB_NAME in config.py")
traceback.print_exc()
return db
def get_collections(db):
"""Returns list of collections present in database"""
res = None
if db:
res = db.list_collection_names()
return res
def collection_exists(collectionName, collections):
"""Checks if a collection exists in the database"""
res = False
if collections and collectionName:
res = collectionName in collections
return res
| 25.488095
| 80
| 0.633349
|
a5aeb62781c71b8f35f039a8f344357f166026bb
| 1,052
|
py
|
Python
|
1.1/chromeOptions.py
|
DrStarkXavier/NoonPyMe
|
4086e7b8b9d034289bf58f3ac916a1ad5d2ed87d
|
[
"Apache-2.0"
] | null | null | null |
1.1/chromeOptions.py
|
DrStarkXavier/NoonPyMe
|
4086e7b8b9d034289bf58f3ac916a1ad5d2ed87d
|
[
"Apache-2.0"
] | null | null | null |
1.1/chromeOptions.py
|
DrStarkXavier/NoonPyMe
|
4086e7b8b9d034289bf58f3ac916a1ad5d2ed87d
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.chrome.options import Options
def set_chrome_options() -> None:
"""Sets chrome options for Selenium.
Chrome options for headless browser is enabled.
1. Explicitly saying that this is a headless application with --headless
2. Explicitly bypassing the security level in Docker with --no-sandbox . Apparently as Docker deamon always runs as a root user, Chrome crushes.
3. Explicitly disabling the usage of /dev/shm/ . The /dev/shm partition is too small in certain VM environments, causing Chrome to fail or crash.
4. Disabling the images with chrome_prefs["profile.default_content_settings"] = {"images": 2} .
"""
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_prefs = {}
chrome_options.experimental_options["prefs"] = chrome_prefs
chrome_prefs["profile.default_content_settings"] = {"images": 2}
return chrome_options
| 52.6
| 153
| 0.729087
|
9d3f89912bd063b3447cc140b41e0af02b48d4e7
| 2,707
|
py
|
Python
|
mesh_reconstruction/misc/prepare_blender_data.py
|
mrguz170/3D-sneaker
|
3fbcde4cc2d647faaee9139eca6276c870121f8a
|
[
"MIT"
] | 1,075
|
2017-11-29T09:29:57.000Z
|
2022-03-22T01:04:09.000Z
|
misc/prepare_blender_data.py
|
shunsukesaito/neural_renderer
|
1de1ebcac98b08e3bc19ec1817572d1b04ac5c54
|
[
"MIT"
] | 40
|
2017-12-12T09:09:00.000Z
|
2022-03-29T02:00:17.000Z
|
misc/prepare_blender_data.py
|
shunsukesaito/neural_renderer
|
1de1ebcac98b08e3bc19ec1817572d1b04ac5c54
|
[
"MIT"
] | 177
|
2017-12-01T13:22:10.000Z
|
2022-02-24T09:53:26.000Z
|
import math
import bpy
import mathutils
def clear():
bpy.ops.wm.open_mainfile(filepath='./tests/data/clean.blend')
def setup(image_size):
context = bpy.context
context.scene.render.resolution_x = image_size
context.scene.render.resolution_y = image_size
context.scene.render.resolution_percentage = 100
context.scene.render.use_antialiasing = False
context.scene.render.alpha_mode = 'SKY'
bpy.context.scene.render.image_settings.color_mode = 'RGB'
context.scene.world.horizon_color = (255, 255, 255)
# camera
camera = bpy.data.cameras.values()[0]
camera.sensor_width = 2
camera.sensor_height = 2
camera.lens = 1.732
# lighting
light = bpy.data.objects['Lamp']
light.data.energy = 1
context.scene.world.light_settings.use_environment_light = True
context.scene.world.light_settings.environment_energy = 0.5
context.scene.world.light_settings.environment_color = 'PLAIN'
def load_obj(filename):
bpy.ops.import_scene.obj(
filepath=filename, use_smooth_groups=False, use_split_objects=False, use_split_groups=False)
object_id = len(bpy.data.objects) - 1
obj = bpy.data.objects[object_id]
bpy.context.scene.objects.active = obj
# normalization
v_min = []
v_max = []
for i in range(3):
v_min.append(min([vertex.co[i] for vertex in obj.data.vertices]))
v_max.append(max([vertex.co[i] for vertex in obj.data.vertices]))
v_min = mathutils.Vector(v_min)
v_max = mathutils.Vector(v_max)
scale = max(v_max - v_min)
v_shift = (v_max - v_min) / 2 / scale
for v in obj.data.vertices:
v.co -= v_min
v.co /= scale
v.co -= v_shift
v.co *= 2
def set_camera_location(elevation, azimuth, distance):
# from https://blender.stackexchange.com/questions/18530/
x = 1 * math.cos(math.radians(-azimuth)) * math.cos(math.radians(elevation)) * distance
y = 1 * math.sin(math.radians(-azimuth)) * math.cos(math.radians(elevation)) * distance
z = 1 * math.sin(math.radians(elevation)) * distance
camera = bpy.data.objects["Camera"]
camera.location = x, y, z
direction = - camera.location
rot_quat = direction.to_track_quat('-Z', 'Y')
camera.rotation_euler = rot_quat.to_euler()
def render(filename):
bpy.context.scene.render.filepath = filename
bpy.ops.render.render(write_still=True)
def run():
image_size = 256
distance = 2.732
azimuth = 90
elevation = 0
clear()
setup(image_size)
load_obj('./tests/data/teapot.obj')
set_camera_location(elevation, azimuth, distance)
render('./tests/data/teapot_blender.png')
if __name__ == '__main__':
run()
| 28.797872
| 100
| 0.680827
|
15cd62145d62bd646f24b5de42bac02ed5c5a888
| 463
|
py
|
Python
|
data/scripts/templates/object/static/terrain/tatooine/shared_rock_spire_smooth_tatooine.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/static/terrain/tatooine/shared_rock_spire_smooth_tatooine.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/static/terrain/tatooine/shared_rock_spire_smooth_tatooine.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/terrain/tatooine/shared_rock_spire_smooth_tatooine.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.235294
| 89
| 0.734341
|
3c73be326c5b7916c3e079fad8ad1066a3078e5d
| 2,297
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/clients/seishub/__init__.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/clients/seishub/__init__.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/clients/seishub/__init__.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
# -*- coding: utf-8 -*-
"""
obspy.clients.seishub - SeisHub database client for ObsPy
=========================================================
The obspy.clients.seishub package contains a client for the seismological
database SeisHub (http://www.seishub.org).
:copyright:
The ObsPy Development Team (devs@obspy.org)
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
Basic Example
-------------
>>> from obspy.clients.seishub import Client
>>> from obspy import UTCDateTime
>>> client = Client(timeout=20)
>>> t = UTCDateTime('2010-01-01T10:00:00')
>>> st = client.waveform.get_waveforms(
... "BW", "MANZ", "", "EH*", t, t+20) # doctest: +SKIP
>>> st.sort() # doctest: +ELLIPSIS +SKIP
<obspy.core.stream.Stream object at ...>
>>> print(st) # doctest: +ELLIPSIS +SKIP
3 Trace(s) in Stream:
BW.MANZ..EHE | 2010-01-01T10:00:00.000000Z - ... | 200.0 Hz, 4001 samples
BW.MANZ..EHN | 2010-01-01T10:00:00.000000Z - ... | 200.0 Hz, 4001 samples
BW.MANZ..EHZ | 2010-01-01T10:00:00.000000Z - ... | 200.0 Hz, 4001 samples
Advanced Examples
-----------------
>>> client.waveform.get_network_ids() #doctest: +SKIP
['KT', 'BW', 'NZ', 'GR', ...]
>>> sta_ids = client.waveform.get_station_ids(network='BW') # doctest: +SKIP
>>> sorted(sta_ids) # doctest: +SKIP
['ALTM', 'BGLD', 'BW01',..., 'WETR', 'ZUGS']
>>> cha_ids = client.waveform.get_channel_ids(
... network='BW', station='MANZ') # doctest: +SKIP
>>> sorted(cha_ids) # doctest: +NORMALIZE_WHITESPACE +SKIP
['AEX', 'AEY', 'BHE', 'BHN', 'BHZ', 'E', 'EHE', 'EHN', 'EHZ', 'HHE', 'HHN',
'HHZ', 'LOG', 'N', 'SHE', 'SHN', 'SHZ', 'Z']
>>> paz = client.station.get_paz(
... 'BW.MANZ..EHZ', UTCDateTime('20090808')) # doctest: +SKIP
>>> paz = paz.items() # doctest: +SKIP
>>> sorted(paz) # doctest: +SKIP
[('gain', 60077000.0),
('poles', [(-0.037004+0.037016j), (-0.037004-0.037016j), (-251.33+0j),
(-131.04-467.29j), (-131.04+467.29j)]),
('sensitivity', 2516800000.0),
('zeros', [0j, 0j])]
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from .client import Client
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| 33.779412
| 77
| 0.611667
|
790d0f0d3713353db16a2853b94844f519067aee
| 3,458
|
py
|
Python
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 291
|
2019-10-25T16:37:59.000Z
|
2022-03-17T21:47:09.000Z
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 56
|
2019-10-26T08:25:33.000Z
|
2021-09-07T11:11:51.000Z
|
examples/mnist_elastic_docker/mnist_slp_estimator.py
|
Pandinosaurus/KungFu
|
80dfa463450330e920b413f65cc49d8e013b84a9
|
[
"Apache-2.0"
] | 53
|
2019-10-25T17:45:40.000Z
|
2022-02-08T13:09:39.000Z
|
import argparse
import functools
import operator
import os
import numpy as np
import tensorflow as tf
from kungfu.tensorflow.v1.helpers.mnist import load_datasets
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
def parse_args():
p = argparse.ArgumentParser(description='Example.')
p.add_argument('--data-dir', type=str, default='.', help='')
p.add_argument('--model-dir', type=str, default='.', help='')
p.add_argument('--kf-optimizer', type=str, default='sync_sgd', help='')
p.add_argument('--batch-size', type=int, default=100, help='')
p.add_argument('--num-epochs', type=int, default=1, help='')
p.add_argument('--learning-rate', type=float, default=0.01, help='')
return p.parse_args()
def slp(x, logits):
n = functools.reduce(operator.mul, [int(d) for d in x.shape[1:]], 1)
output = tf.layers.dense(inputs=tf.reshape(x, [-1, n]), units=logits)
return output, tf.argmax(output, axis=1)
def model_fn(features, labels, mode):
output, predictions = slp(features['x'], 10)
loss = tf.losses.sparse_softmax_cross_entropy(tf.cast(labels, tf.int32),
output)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions)
}
optimizer = tf.train.GradientDescentOptimizer(0.1)
from kungfu.tensorflow.optimizers import SynchronousSGDOptimizer
optimizer = SynchronousSGDOptimizer(optimizer)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def input_fn(ds, batch_size, epochs=1, shuffle=True):
features = {'x': ds.images}
return tf.estimator.inputs.numpy_input_fn(x=features,
y=ds.labels,
batch_size=batch_size,
num_epochs=epochs,
shuffle=shuffle)
def get_model_dir(args):
from kungfu.python import uid
x = uid()
port = (x >> 16) & 0xffff
version = x & 0xffff
suffix = '%d.%d' % (port, version)
return os.path.join(args.model_dir, suffix)
MNIST_DATA_SIZE = 60000
def main(do_eval=True):
args = parse_args()
model_dir = get_model_dir(args)
data = load_datasets(args.data_dir, normalize=True)
classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir)
from kungfu.tensorflow.experimental.hook import ElasticHook
hooks = [ElasticHook(args.batch_size, args.num_epochs, MNIST_DATA_SIZE)]
classifier.train(input_fn(data.train,
args.batch_size,
epochs=args.num_epochs),
hooks=hooks)
if not do_eval:
import time
time.sleep(1)
return
results = classifier.evaluate(input_fn(data.test,
args.batch_size,
shuffle=False),
hooks=[],
steps=1)
print('results: %s' % (results, ))
if __name__ == '__main__':
print('main started')
main(False)
print('main finished')
| 34.58
| 79
| 0.589358
|
37ecbb078f0a073a962981fcfb83ccb2af3c86cd
| 428
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/scattersmith/marker/_opacitysrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 7
|
2022-01-16T12:28:16.000Z
|
2022-03-04T15:31:45.000Z
|
packages/python/plotly/plotly/validators/scattersmith/marker/_opacitysrc.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 14
|
2021-10-20T23:33:47.000Z
|
2021-12-21T04:50:37.000Z
|
packages/python/plotly/plotly/validators/scattersmith/marker/_opacitysrc.py
|
jiangrongbo/plotly.py
|
df19fc702b309586cc24e25373b87e8bdbb3ff60
|
[
"MIT"
] | 1
|
2021-11-29T22:55:05.000Z
|
2021-11-29T22:55:05.000Z
|
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="opacitysrc", parent_name="scattersmith.marker", **kwargs
):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 30.571429
| 83
| 0.663551
|
dc75972abf8347c04c8f629f3cc04fac3df80cd8
| 70,230
|
py
|
Python
|
haros/extractor.py
|
esol-community/haros
|
d6b7d829d306ef5eba309e16b0183f24b50dbc44
|
[
"MIT"
] | null | null | null |
haros/extractor.py
|
esol-community/haros
|
d6b7d829d306ef5eba309e16b0183f24b50dbc44
|
[
"MIT"
] | null | null | null |
haros/extractor.py
|
esol-community/haros
|
d6b7d829d306ef5eba309e16b0183f24b50dbc44
|
[
"MIT"
] | null | null | null |
#Copyright (c) 2017 Andre Santos
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
import itertools
import logging
from operator import attrgetter
import os
import re
import subprocess
from urllib2 import urlopen, URLError
import xml.etree.ElementTree as ET
import yaml
from bonsai.model import (
CodeGlobalScope, CodeReference, CodeFunctionCall, pretty_str
)
from bonsai.cpp.model import (
CppFunctionCall, CppDefaultArgument, CppOperator, CppReference
)
from bonsai.analysis import (
CodeQuery, resolve_reference, resolve_expression, get_control_depth,
get_conditions, is_under_loop
)
try:
from bonsai.cpp.clang_parser import CppAstParser
except ImportError:
CppAstParser = None
from bonsai.py.py_parser import PyAstParser
from rospkg import RosPack, RosStack, ResourceNotFound
from xml.etree.cElementTree import ElementTree
from distutils.spawn import find_executable
from .cmake_parser import RosCMakeParser
from .launch_parser import LaunchParser, LaunchParserError
from .metamodel import (
Project, Repository, Package, SourceFile, Node, Person, SourceCondition,
Publication, Subscription, ServiceServerCall, ServiceClientCall, Location,
ReadParameterCall, WriteParameterCall
)
from .util import cwd
###############################################################################
# Utility
###############################################################################
class LoggingObject(object):
log = logging.getLogger(__name__)
def findRosPackages(paths = None, as_stack = False):
"""
Find ROS packages inside folders.
:param paths: [list] of [str] File system path to search, [None] to use the ROS default search paths.
:param as_stack: [bool] Whether the paths point to stacks.
:returns: [dict] Dictionary of [str]package_name -> [str]package_path.
"""
ros_version = os.environ.get("ROS_VERSION")
if ros_version != "1":
# try ROS2 crawling with colcon if possible
# (in ambiguous cases, we give preference to trying the ROS2 method first,
# because ROS1 rospkg only produces misleading/
# incorrect information when used in ROS2/mixed workspaces.
colcon = find_executable('colcon')
if colcon != None:
cmd = [colcon, 'list']
if paths != None:
cmd.extend(['--base-paths'])
cmd.extend(paths)
try:
pkglist = subprocess.check_output(cmd)
# format is <pkg_name>\t<pkg_path>\t<build_system>\n
pkglist = pkglist.split('\n')
pkgs = {}
for pkginfo in pkglist:
pkginfo_parts = pkginfo.split('\t')
if len(pkginfo_parts) < 2:
continue
if pkginfo_parts[0] in pkgs:
continue
pkgs[pkginfo_parts[0]] = pkginfo_parts[1]
return pkgs
except:
pass
# ^ if colcon != None
# ^ if ros_version != "1"
# else: try the ROS1 way
ros = None
if as_stack:
ros = RosStack.get_instance(paths)
else:
ros = RosPack.get_instance(paths)
pkg_names = ros.list()
pkgs = {}
for pkg_name in pkg_names:
if pkg_name in pkgs:
continue
pkgs[pkg_name] = ros.get_path(pkg_name)
return pkgs
# ^ findRosPackages(paths)
###############################################################################
# Source Extractor
###############################################################################
class ProjectExtractor(LoggingObject):
def __init__(self, index_file, env = None, pkg_cache = None,
repo_cache = None, repo_path = None, distro_url = None,
require_repos = False, parse_nodes = False, node_cache = None):
self.log.debug("ProjectExtractor(%s, %s, %s)",
index_file, repo_path, distro_url)
self.index_file = index_file
self.repo_path = repo_path
self.distribution = distro_url
self.require_repos = require_repos
self.parse_nodes = parse_nodes
self.environment = env if not env is None else {}
self.package_cache = pkg_cache if not pkg_cache is None else {}
self.repo_cache = repo_cache if not repo_cache is None else {}
self.node_cache = node_cache if not node_cache is None else {}
self.project = None
self.packages = None
self.missing = None
self.repositories = None
self.configurations = None
self.node_specs = None
self.rules = None
def index_source(self, settings=None):
self.log.debug("ProjectExtractor.index_source()")
self._setup()
self._load_user_repositories()
self._find_local_packages()
if self.missing and self.distribution:
self._load_distro_repositories()
self._find_local_packages()
self._topological_sort()
for name in self.missing:
self.log.warning("Could not find package " + name)
self._populate_packages_and_dependencies(settings=settings)
self._update_node_cache()
self._find_nodes(settings)
def _setup(self):
try:
with open(self.index_file, "r") as handle:
data = yaml.safe_load(handle)
except IOError as e:
data = {}
self.project = Project(data.get("project", "default"))
self.repositories = data.get("repositories", {})
self.packages = set(data.get("packages")
or list(findRosPackages(["."])))
self.missing = set(self.packages)
self.configurations = data.get("configurations", {})
self.node_specs = data.get("nodes", {})
self.rules = data.get("rules", {})
def _load_user_repositories(self):
self.log.info("Looking up user provided repositories.")
extractor = RepositoryExtractor()
for name, data in self.repositories.iteritems():
repo = self.repo_cache.get(name)
if repo:
self.project.repositories.append(repo)
else:
extractor.load_from_user(name, data, project = self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _find_local_packages(self):
self.log.info("Looking for packages locally.")
cdir = os.path.abspath(".")
alt_paths = [self.repo_path, cdir] if self.repo_path else [cdir]
extractor = PackageExtractor(alt_paths = alt_paths)
extractor.refresh_package_cache()
found = []
for name in self.missing:
pkg = self.package_cache.get(name)
if pkg:
self.project.packages.append(pkg)
found.append(name)
elif extractor.find_package(name, project = self.project):
found.append(name)
self.missing.difference_update(found)
def _load_distro_repositories(self):
self.log.info("Looking up repositories from official distribution.")
try:
data = yaml.safe_load(urlopen(self.distribution).read())["repositories"]
except URLError as e:
self.log.warning("Could not download distribution data.")
return
extractor = RepositoryExtractor()
extractor.load_needed_from_distro(data, self.missing, self.project)
if self.repo_path:
try:
extractor.download(self.repo_path)
except RepositoryCloneError as e:
if self.require_repos:
raise e
else:
self.log.warning("Could not download all repositories.")
def _topological_sort(self):
dependencies = {}
pending = list(self.project.packages)
for pkg in self.project.packages:
pkg.topological_tier = -1
dependencies[pkg.id] = set(p for p in pkg.dependencies.packages
if p in self.packages)
tier = 1
emitted = []
while pending:
next_pending = []
next_emitted = []
for pkg in pending:
deps = dependencies[pkg.id]
deps.difference_update(emitted)
if deps:
next_pending.append(pkg)
else:
pkg.topological_tier = tier
next_emitted.append(pkg.name)
if not next_emitted:
# cyclic dependencies detected
self.log.warning("Cyclic dependencies: %s", next_pending)
for pkg in next_pending:
pkg.topological_tier = tier
next_pending = None
pending = next_pending
emitted = next_emitted
tier += 1
self.project.packages.sort(key = attrgetter("topological_tier", "id"))
def _populate_packages_and_dependencies(self, settings=None):
found = set()
extractor = PackageExtractor()
extractor.packages = self.project.packages
for pkg in self.project.packages:
found.add(pkg.name)
analysis_ignore = extractor._populate_package(pkg)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps = extractor._extra
extractor._extra = []
while deps:
pkg = deps.pop()
assert pkg.name not in found
pkg._analyse = False
found.add(pkg.name)
self.project.packages.append(pkg)
analysis_ignore = extractor._populate_package(pkg)
if settings is not None:
settings.ignored_lines.update(analysis_ignore)
deps.extend(extractor._extra)
extractor._extra = []
def _find_nodes(self, settings):
pkgs = {pkg.name: pkg for pkg in self.project.packages if pkg._analyse}
ws = settings.workspace
if not ws:
ws = settings.find_ros_workspace()
if CppAstParser is None:
self.log.warning("C++ AST parser not found.")
extractor = NodeExtractor(pkgs, self.environment, ws = ws,
node_cache = self.node_cache,
parse_nodes = self.parse_nodes)
if self.parse_nodes and CppAstParser is not None:
if settings is None:
CppAstParser.set_library_path()
db_dir = os.path.join(extractor.workspace, "build")
if os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(db_dir)
else:
#library file if given explicitly, otherwise path
if settings.cpp_parser_lib_file:
CppAstParser.set_library_file(settings.cpp_parser_lib_file)
else:
CppAstParser.set_library_path(settings.cpp_parser_lib)
CppAstParser.set_standard_includes(settings.cpp_includes)
db_dir = settings.cpp_compile_db
if db_dir and os.path.isfile(
os.path.join(db_dir, "compile_commands.json")):
CppAstParser.set_database(settings.cpp_compile_db)
for pkg in self.project.packages:
if pkg._analyse and pkg.name not in self.package_cache:
extractor.find_nodes(pkg)
def _update_node_cache(self):
self.log.debug("Importing cached Nodes.")
data = [datum for datum in self.node_cache.itervalues()]
self.node_cache = {}
for datum in data:
try:
pkg = self._get_package(datum["package"])
source_files = self._get_files(pkg, datum["files"])
except ValueError as e:
# either a package or a file is no longer part of the analysis
self.log.debug("Cached node %s: %s", datum["name"], e)
continue
mtime = datum["timestamp"]
for sf in source_files:
if sf.timestamp > mtime:
# a file was modified, needs to be parsed again
continue
node = Node(datum["name"], pkg, rosname = datum["rosname"],
nodelet = datum["nodelet"])
node.source_files = source_files
for p in datum["advertise"]:
node.advertise.append(self._pub_from_JSON(p))
for p in datum["subscribe"]:
node.subscribe.append(self._sub_from_JSON(p))
for p in datum["service"]:
node.service.append(self._srv_from_JSON(p))
for p in datum["client"]:
node.client.append(self._client_from_JSON(p))
for p in datum["readParam"]:
node.read_param.append(self._read_from_JSON(p))
for p in datum["writeParam"]:
node.write_param.append(self._write_from_JSON(p))
self.node_cache[node.node_name] = node
def _get_package(self, name):
for pkg in self.project.packages:
if pkg.name == name:
return pkg
raise ValueError("cannot find package: " + name)
def _get_files(self, pkg, filenames):
files = []
for filename in filenames:
found = False
for sf in pkg.source_files:
if sf.full_name == filename:
found = True
files.append(sf)
break
if not found:
raise ValueError("cannot find file: " + filename)
return files
def _pub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return Publication(datum["name"], datum["namespace"], datum["type"],
datum["queue"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _sub_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return Subscription(datum["name"], datum["namespace"], datum["type"],
datum["queue"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs, location = l(datum["location"]))
def _srv_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return ServiceServerCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _client_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _read_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return ReadParameterCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _write_from_JSON(self, datum):
l = self._location_from_JSON
cs = [SourceCondition(c["condition"], location = l(c["location"]))
for c in datum["conditions"]]
return WriteParameterCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = cs,
location = l(datum["location"]))
def _location_from_JSON(self, datum):
try:
pkg = self._get_package(datum["package"])
sf = None
filename = datum["file"]
if filename:
sf = self._get_files(pkg, [filename])[0]
except ValueError:
return None
return Location(pkg, file = sf, line = datum["line"],
fun = datum["function"], cls = datum["class"])
###############################################################################
# Repository Extractor
###############################################################################
class RepositoryCloneError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RepositoryExtractor(LoggingObject):
def __init__(self):
self.repositories = []
self.declared_packages = set()
def load_from_user(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_user(%s, %s)", name, data)
repo = Repository(name, proj = project)
repo.status = "private"
repo.vcs = data["type"]
repo.url = data["url"]
repo.version = data["version"]
repo.declared_packages = data["packages"]
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_from_distro(self, name, data, project = None):
self.log.debug("RepositoryExtractor.from_distro(%s, %s)", name, data)
if not "source" in data:
self.log.debug("There is no source in provided data.")
return
repo = Repository(name, proj = project)
repo.status = data.get("status")
src = data["source"]
repo.vcs = src["type"]
repo.url = src["url"]
repo.version = src["version"]
if "release" in data:
repo.declared_packages = data["release"].get("packages", [name])
self.repositories.append(repo)
self.declared_packages.update(repo.declared_packages)
if project:
project.repositories.append(repo)
return repo
def load_needed_from_distro(self, data, pkgs, project = None):
if not pkgs:
return True
remaining = set(pkgs)
for name, info in data.iteritems():
if not "release" in info:
continue
for pkg in info["release"].get("packages", [name]):
try:
remaining.remove(pkg)
self.load_from_distro(name, info, project = project)
except KeyError as e:
pass
if not remaining:
break
return not remaining
def download(self, repo_path):
self.log.debug("RepositoryExtractor.download(%s)", repo_path)
for repo in self.repositories:
if not repo.url:
self.log.debug("%s has no URL to download from.", repo.id)
continue
path = os.path.join(repo_path, repo.name)
clone = False
if not os.path.exists(path):
os.makedirs(path)
clone = True
with cwd(path):
if repo.vcs == "git":
self._download_git(repo, path, clone)
elif repo.vcs == "hg":
self._download_hg(repo, path, clone)
elif repo.vcs == "svn":
self._download_svn(repo, path, clone)
return True
GIT_INIT = ("git", "init")
GIT_PULL = ("git", "pull")
GIT_COUNT = ("git", "rev-list", "HEAD", "--count")
def _download_git(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_git(%s)", path)
try:
if clone:
subprocess.check_call(self.GIT_INIT)
subprocess.check_call(["git", "remote",
"add", "-t", repo.version,
"-f", "origin", repo.url])
subprocess.check_call(["git", "checkout", repo.version])
else:
subprocess.check_call(self.GIT_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git error: " + str(e))
HG_PULL = ("hg", "pull")
HG_COUNT = ("hg", "id", "--num", "--rev", "tip")
def _download_hg(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_hg(%s)", path)
try:
if clone:
subprocess.check_call(["hg", "clone", repo.url,
"-r", repo.version])
else:
subprocess.check_call(self.HG_PULL)
repo.path = path
repo.commits = int(subprocess.check_output(self.HG_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("hg error: " + str(e))
SVN_FETCH = ("git", "svn", "fetch")
def _download_svn(self, repo, path, clone = False):
self.log.debug("RepositoryExtractor._download_svn(%s)", path)
try:
if clone:
if repo.version == "trunk":
version = repo.version
else:
version = "branches/" + repo.version
subprocess.check_call(["git", "svn", "clone",
"-T", version, repo.url])
else:
subprocess.check_call(self.SVN_FETCH)
self.path = path
self.commits = int(subprocess.check_output(self.GIT_COUNT).rstrip())
except subprocess.CalledProcessError as e:
raise RepositoryCloneError("git-svn error: " + str(e))
###############################################################################
# Package Extractor
###############################################################################
class PackageExtractor(LoggingObject):
def __init__(self, alt_paths = None):
self.packages = []
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.alt_paths = alt_paths
self.altpack_pkgs = None
self.altstack_pkgs = None
self._pkg_cache = {}
self._extra = []
def refresh_package_cache(self):
self.rospack_pkgs = None
self.rosstack_pkgs = None
self.altpack_pkgs = None
self.altstack_pkgs = None
# To use with LaunchParser.
def get(self, pkg_id):
if pkg_id in self._pkg_cache:
return self._pkg_cache[pkg_id]
for pkg in self.packages:
if pkg.id == pkg_id:
self._pkg_cache[pkg_id] = pkg
return pkg
try:
assert pkg_id.startswith("package:")
pkg = self._find(pkg_id[8:], None)
self._pkg_cache[pkg_id] = pkg
self._extra.append(pkg)
except (IOError, ET.ParseError, ResourceNotFound):
return None
return pkg
def find_package(self, name, project=None, analyse=True):
try:
pkg = self._find(name, project)
pkg._analyse = analyse
self.packages.append(pkg)
if project:
project.packages.append(pkg)
for repo in project.repositories:
if name in repo.declared_packages:
pkg.repository = repo
repo.packages.append(pkg)
break
# self._populate_package(pkg)
except (IOError, ET.ParseError, KeyError):
return None
return pkg
def _find(self, name, project):
path = None
if self.alt_paths:
if self.altpack_pkgs == None:
self.altpack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=False)
path = self.altpack_pkgs.get(name, None)
if (path == None):
if self.altstack_pkgs == None:
self.altstack_pkgs = findRosPackages(paths=self.alt_paths, as_stack=True)
path = self.altstack_pkgs.get(name, None)
if path == None:
if self.rospack_pkgs == None:
self.rospack_pkgs = findRosPackages(as_stack=False)
path = self.rospack_pkgs.get(name, None)
if path == None:
if self.rosstack_pkgs == None:
self.rosstack_pkgs = findRosPackages(as_stack=True)
path = self.rosstack_pkgs.get(name, None)
if path == None:
raise KeyError(name)
return PackageParser.parse(os.path.join(path, "package.xml"),
project = project)
EXCLUDED = (".git", "doc", "cmake", ".eggs", "__pycache__")
def _populate_package(self, pkg):
self.log.debug("PackageExtractor.populate(%s)", pkg)
if not pkg.path:
self.log.debug("Package %s has no path", pkg.name)
return
self.log.info("Indexing source files for package %s", pkg.name)
analysis_ignore = {}
#pkgs = {pkg.id: pkg for pkg in self.packages}
launch_parser = LaunchParser(pkgs=self)
prefix = len(pkg.path) + len(os.path.sep)
for root, subdirs, files in os.walk(pkg.path, topdown=True):
if 'COLCON_IGNORE' in files or 'AMENT_IGNORE' in files or 'CATKIN_IGNORE' in files:
del subdirs[:] # don't traverse into subdirectories
continue # skip
subdirs[:] = [d for d in subdirs if d not in self.EXCLUDED]
path = root[prefix:]
for filename in files:
self.log.debug("Found file %s at %s", filename, path)
source = SourceFile(filename, path, pkg)
ignore = source.set_file_stats()
if any(v for v in ignore.itervalues()):
analysis_ignore[source.id] = ignore
if pkg._analyse and source.language == "launch":
self.log.info("Parsing launch file: " + source.path)
try:
source.tree = launch_parser.parse(source.path)
except LaunchParserError as e:
self.log.warning("Parsing error in %s:\n%s",
source.path, str(e))
pkg.source_files.append(source)
pkg.size += source.size
pkg.lines += source.lines
pkg.sloc += source.sloc
return analysis_ignore
###############################################################################
# Package Parser
###############################################################################
class PackageParser(LoggingObject):
@staticmethod
def parse(pkg_file, project = None):
PackageParser.log.debug("PkgParser.parse(%s, %s)", pkg_file, project)
with open(pkg_file, "r") as handle:
root = ET.parse(handle).getroot()
name = root.find("name").text.strip()
package = Package(name, proj = project)
package.path = os.path.dirname(pkg_file)
PackageParser.log.info("Found package %s at %s", package, package.path)
PackageParser._parse_metadata(root, package)
PackageParser._parse_export(root, package)
PackageParser._parse_dependencies(root, package)
return package
@staticmethod
def _parse_metadata(xml, package):
package.description = xml.find("description").text.strip()
for el in xml.findall("maintainer"):
name = el.text.strip() or "?"
email = el.get("email") or "email@example.com"
package.maintainers.add(Person(name, email))
for el in xml.findall("author"):
name = el.text.strip() or "?"
email = el.get("email") or "email@example.com"
package.authors.add(Person(name, email))
for el in xml.findall("license"):
package.licenses.add(el.text.strip())
for el in xml.findall("url"):
value = el.get("type")
if value is None or value == "website":
if el.text:
package.website = el.text.strip()
elif value == "repository":
if el.text:
package.vcs_url = el.text.strip()
elif value == "bugtracker":
if el.text:
package.bug_url = el.text.strip()
el = xml.find("version")
if el is not None:
package.version = el.text.strip()
@staticmethod
def _parse_export(xml, package):
el = xml.find("export")
if not el is None:
package.is_metapackage = not el.find("metapackage") is None
if not el.find("nodelet") is None:
nodelets = el.find("nodelet").get("plugin")
nodelets = nodelets.replace("${prefix}", package.path)
with open(nodelets, "r") as handle:
root = ET.parse(handle).getroot()
PackageParser.log.info("Found nodelets at %s", nodelets)
if root.tag == "library":
libs = (root,)
else:
libs = root.findall("library")
for el in libs:
libname = el.get("path").rsplit(os.sep)[-1]
for cl in el.findall("class"):
nodelet = cl.get("type").split("::")[-1]
node = Node(libname, package, nodelet = nodelet)
package.nodes.append(node)
@staticmethod
def _parse_dependencies(xml, package):
sources = ["build_depend"]
if xml.get("format") == "2":
sources.extend(("depend", "build_export_depend", "exec_depend"))
else:
sources.append("run_depend")
for src in sources:
for el in xml.findall(src):
name = el.text.strip()
if name:
package.dependencies.packages.add(name)
###############################################################################
# Hard-coded Node Parser
###############################################################################
class HardcodedNodeParser(LoggingObject):
model_dir = None
distro = None
_cache = {}
@classmethod
def get(cls, pkg, node_type):
cls.log.debug("Fetching hard-coded node: (%s, %s, %s)",
pkg, node_type, cls.distro)
node_id = "node:" + pkg + "/" + node_type
if node_id in cls._cache:
cls.log.debug("Node already in cache.")
return cls._cache[node_id]
filename = os.path.join(cls.model_dir, pkg + ".yaml")
try:
with open(filename) as handle:
data = yaml.safe_load(handle)
except IOError as e:
cls.log.debug("YAML file not found: %s", filename)
return None
if not cls.distro in data:
cls.log.debug("Package has no data for ROS %s.", cls.distro)
return None
if not node_type in data[cls.distro]:
cls.log.debug("Node does not exist for ROS %s.", cls.distro)
return None
cls.log.debug("Building node from YAML data.")
node = cls._build_node(node_type, cls.distro, Package(pkg), data)
cls._cache[node_id] = node
return node
@classmethod
def _build_node(cls, node_type, distro, pkg, data):
node_data = data[distro][node_type]
base = node_data.get("base")
if base:
node = cls._build_node(node_type, base, pkg, data)
else:
node = Node(node_type, pkg, rosname = node_data.get("rosname"),
nodelet = node_type if node_data["nodelet"] else None)
for datum in node_data.get("advertise", ()):
pub = Publication(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.advertise.append(pub)
for datum in node_data.get("subscribe", ()):
sub = Subscription(datum["name"], datum["namespace"],
datum["type"], datum["queue"],
control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.subscribe.append(sub)
for datum in node_data.get("service", ()):
srv = ServiceServerCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.service.append(srv)
for datum in node_data.get("client", ()):
cli = ServiceClientCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.client.append(cli)
for datum in node_data.get("readParam", ()):
par = ReadParameterCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.read_param.append(par)
for datum in node_data.get("writeParam", ()):
par = WriteParameterCall(datum["name"], datum["namespace"],
datum["type"], control_depth = datum["depth"],
repeats = datum["repeats"],
conditions = [SourceCondition(c)
for c in datum["conditions"]])
node.write_param.append(par)
return node
###############################################################################
# Node Extractor
###############################################################################
class NodeExtractor(LoggingObject):
def __init__(self, pkgs, env, ws=None, node_cache=None, parse_nodes=False):
self.package = None
self.packages = pkgs
self.environment = env
self.workspace = ws
self.node_cache = node_cache
self.parse_nodes = parse_nodes
self.nodes = []
self.roscpp_extractor = None
self.rospy_extractor = None
def find_nodes(self, pkg):
self.log.debug("NodeExtractor.find_nodes(%s)", pkg)
self.package = pkg
srcdir = self.package.path[len(self.workspace):]
srcdir = os.path.join(self.workspace, srcdir.split(os.sep, 1)[0])
bindir = os.path.join(self.workspace, "build")
cmake_path = os.path.join(self.package.path, "CMakeLists.txt")
if os.path.isfile(cmake_path):
parser = RosCMakeParser(srcdir, bindir, pkgs = self.packages,
env = self.environment,
vars = self._default_variables())
parser.parse(cmake_path)
self._update_nodelets(parser.libraries)
self._register_nodes(parser.executables)
else:
# It may be normal for pure Python projects not to have a CMakeLists.txt
# Instead, search for python files with "def main():"
pattern = re.compile('^def\s+main\s*\(.*\)\s*:')
for file in pkg.source_files:
if file.language != 'python':
continue # continue with next file
entry_point_found = False
with open(file.path) as f:
for line in f:
match = pattern.match(line)
if match is not None:
entry_point_found = True
break
if entry_point_found == False:
continue # continue with next file
# else: this is a python file with a 'main' function,
# so we consider it a node.
node = Node(file.full_name, pkg)
node.source_files.append(file)
self.nodes.append(node)
self.package.nodes.append(node)
if self.parse_nodes:
self._extract_primitives()
def _default_variables(self):
# TODO: clean up these hardcoded values
v = {}
v["catkin_INCLUDE_DIRS"] = os.path.join(self.workspace,
"devel/include")
v["Boost_INCLUDE_DIRS"] = "/usr/include/"
v["Eigen_INCLUDE_DIRS"] = "/usr/include/eigen3"
v["ImageMagick_INCLUDE_DIRS"] = "/usr/include/ImageMagick"
v["PROJECT_SOURCE_DIR"] = self.package.path
return v
def _get_file(self, path):
for sf in self.package.source_files:
if sf.path == path:
return sf
return None
def _update_nodelets(self, libraries):
lib_files = {}
for target in libraries.itervalues():
files = []
for path in target.files:
sf = self._get_file(path)
if sf:
files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
files.append(sf)
lib_files[target.prefixed_name] = files
for nodelet in self.package.nodes:
if not nodelet.is_nodelet:
continue
if nodelet.name in lib_files:
nodelet.source_files = lib_files[nodelet.name]
def _register_nodes(self, executables):
for target in executables.itervalues():
node = Node(target.output_name, self.package)
for path in target.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
for link in target.links:
for path in link.files:
sf = self._get_file(path)
if sf:
node.source_files.append(sf)
self.nodes.append(node)
self.package.nodes.append(node)
def _extract_primitives(self, force_when_cached=False):
self.roscpp_extractor = RoscppExtractor(self.package, self.workspace)
self.rospy_extractor = RospyExtractor(self.package, self.workspace)
for i in xrange(len(self.package.nodes)):
node = self.package.nodes[i]
self.log.debug("Extracting primitives for node %s", node.id)
if node.source_tree is not None:
self.log.debug("Node already has a source tree. Skipped.")
continue
if (node.node_name in self.node_cache) and not force_when_cached:
self.log.debug("Using Node %s from cache.", node.node_name)
node = self.node_cache[node.node_name]
assert node.package is self.package
self.package.nodes[i] = node
continue
node.source_tree = CodeGlobalScope()
node.advertise = []
node.subscribe = []
node.service = []
node.client = []
node.read_param = []
node.write_param = []
if not node.source_files:
self.log.warning("no source files for node " + node.id)
if node.language == "cpp" and CppAstParser is not None:
self.roscpp_extractor.extract(node)
elif node.language == 'py':
self.rospy_extractor.extract(node)
else:
self.log.debug("Node written in %s.", node.language)
class RoscppExtractor(LoggingObject):
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
def extract(self, node):
self.log.debug("Parsing C++ files for node %s", node.id)
parser = CppAstParser(workspace=self.workspace, logger=__name__)
for sf in node.source_files:
self.log.debug("Parsing C++ file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
self._query_nh_param_primitives(node, parser.global_scope)
self._query_param_primitives(node, parser.global_scope)
def _query_comm_primitives(self, node, gs):
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "ros::Publisher":
continue
self._on_publication(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "ros::Subscriber":
continue
self._on_subscription(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("advertiseService").get():
if call.canonical_type != "ros::ServiceServer":
continue
self._on_service(node,
self._resolve_node_handle(call.method_of), call)
for call in CodeQuery(gs).all_calls.where_name("serviceClient").get():
if call.canonical_type != "ros::ServiceClient":
continue
self._on_client(node,
self._resolve_node_handle(call.method_of), call)
self.log.debug("Looking for image_transport::SubscriberFilter calls.")
for call in CodeQuery(gs).all_calls.where_name("SubscriberFilter").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@image_transport@S@SubscriberFilter"):
continue
if not "image_transport::SubscriberFilter" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, topic_pos = 1, queue_pos = 2,
msg_type = "sensor_msgs/Image")
self.log.debug("Looking for message_filters::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("Subscriber").get():
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
if isinstance(call.reference, str):
if not call.reference.startswith("c:@N@message_filters@S@Subscriber"):
continue
if not "message_filters::Subscriber" in call.canonical_type:
continue
n = call.arguments[0] if call.arguments else None
self._on_subscription(node, self._resolve_node_handle(n),
call, topic_pos = 1, queue_pos = 2)
self.log.debug("Looking for image_transport::Subscriber calls.")
for call in CodeQuery(gs).all_calls.where_name("subscribe").get():
if call.canonical_type != "image_transport::Subscriber":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_subscription(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
self.log.debug("Looking for image_transport::Publisher.")
for call in CodeQuery(gs).all_calls.where_name("advertise").get():
if call.canonical_type != "image_transport::Publisher":
continue
self.log.debug("Found: %s", call.pretty_str())
self.log.debug("%s", type(call))
self.log.debug("%s", call.__dict__)
n = call.method_of if call.method_of else None
self._on_publication(node, self._resolve_it_node_handle(n),
call, msg_type = "sensor_msgs/Image")
def _query_nh_param_primitives(self, node, gs):
nh_prefix = "c:@N@ros@S@NodeHandle@"
reads = ("getParam", "getParamCached", "param", "hasParam",
"searchParam")
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
self._on_read_param(node, self._resolve_node_handle(call),
call)
writes = ("setParam", "deleteParam")
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::NodeHandle")
or (isinstance(call.reference, str)
and call.reference.startswith(nh_prefix))):
self._on_write_param(node, self._resolve_node_handle(call),
call)
def _query_param_primitives(self, node, gs):
ros_prefix = "c:@N@ros@N@param@"
reads = ("get", "getCached", "param", "has")
for call in CodeQuery(gs).all_calls.where_name(reads).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
self._on_read_param(node, "", call)
for call in (CodeQuery(gs).all_calls.where_name("search")
.where_result("bool").get()):
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
if len(call.arguments) > 2:
ns = resolve_expression(call.arguments[0])
if not isinstance(ns, basestring):
ns = "?"
else:
ns = "~"
self._on_read_param(node, ns, call)
writes = ("set", "del")
for call in CodeQuery(gs).all_calls.where_name(writes).get():
if (call.full_name.startswith("ros::param")
or (isinstance(call.reference, str)
and call.reference.startswith(ros_prefix))):
self._on_write_param(node, "", call)
def _on_publication(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
pub = Publication(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found Publication on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, ns, call, topic_pos=0, queue_pos=1,
msg_type=None):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call, topic_pos=topic_pos)
msg_type = msg_type or self._extract_message_type(call)
queue_size = self._extract_queue_size(call, queue_pos=queue_pos)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
sub = Subscription(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found Subscription on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
srv = ServiceServerCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_client(self, node, ns, call):
if len(call.arguments) <= 1:
return
name = self._extract_topic(call)
msg_type = self._extract_message_type(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_read_param(self, node, ns, call):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive = True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location = location)
for c in get_conditions(call, recursive = True)]
read = ReadParameterCall(name, ns, None, location = location,
control_depth = depth, conditions = conditions,
repeats = is_under_loop(call, recursive = True))
node.read_param.append(read)
self.log.debug("Found Read on %s/%s (%s)", ns, name, "string")
def _on_write_param(self, node, ns, call):
if len(call.arguments) < 1:
return
name = self._extract_topic(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
wrt = WriteParameterCall(name, ns, None, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.write_param.append(wrt)
self.log.debug("Found Write on %s/%s (%s)", ns, name, "string")
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
souce_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file, line=call.line,
fun=function)
def _resolve_it_node_handle(self, value):
value = resolve_expression(value)
if (isinstance(value, CppFunctionCall)
and value.name == "ImageTransport"):
return self._resolve_node_handle(value.arguments[0])
return "?"
def _resolve_node_handle(self, call):
ns = "?"
node_handle = getattr(call, 'method_of', None) or call
if getattr(node_handle, 'name', None) == 'operator->':
node_handle = node_handle.arguments[0]
node_handle_def = (resolve_reference(node_handle)
if isinstance(node_handle, CppReference)
else None)
# A function needs to be called to create a NodeHandle (constructors
# are functions)
if isinstance(node_handle_def, CppFunctionCall):
# node_handle_def is a call to the constructor
if node_handle_def.name == 'NodeHandle':
args = node_handle_def.arguments
# Copy constructor
if len(args) == 1:
parent = args[0]
return self._resolve_node_handle(parent)
# All other constructor have at least two arguments. The third
# is never meaningful
# If a parent NodeHande is passed, it is the first argument
parent = None if isinstance(args[0], basestring) else args[0]
prefix = ('/' + self._resolve_node_handle(parent)
if parent
else '')
# If a namespace argument is passed, it is either first or
# second parameter. Only the first has an empty default value.
passed_ns = '?'
if isinstance(args[0], basestring):
passed_ns = args[0]
elif isinstance(args[0], CppDefaultArgument):
passed_ns = ''
elif isinstance(args[1], basestring):
passed_ns = args[1]
ns = prefix + passed_ns
elif node_handle_def.name == 'getNodeHandle':
ns = ''
elif node_handle_def.name == 'getPrivateNodeHandle':
ns = '~'
elif isinstance(node_handle_def, CppDefaultArgument):
ns = ''
return ns
def _extract_topic(self, call, topic_pos=0):
name = resolve_expression(call.arguments[topic_pos])
if not isinstance(name, basestring):
name = "?"
return name or "?"
def _extract_message_type(self, call):
if call.template:
template = call.template[0]
std_alloc = re.search("_<std::allocator<void>", template)
if std_alloc is not None:
template = template[:std_alloc.start()]
assert re.match(r"\w+::\w+$", template)
return template.replace("::", "/")
if (call.name not in ("subscribe", "advertiseService")
and 'NodeHandle' not in call.full_name):
return "?"
callback = (call.arguments[2]
if call.name == "subscribe"
else call.arguments[1])
while isinstance(callback, CppOperator):
callback = callback.arguments[0]
type_string = callback.result
try:
type_string = type_string.split(None, 1)[1]
except IndexError:
type_string = type_string.strip()
if type_string.startswith("(*)"):
type_string = type_string[3:]
if type_string[0] == "(" and type_string[-1] == ")":
type_string = type_string[1:-1]
if call.name == "advertiseService":
type_string = type_string.split(", ")[0]
is_const = type_string.startswith("const ")
if is_const:
type_string = type_string[6:]
is_ref = type_string.endswith(" &")
if is_ref:
type_string = type_string[:-2]
is_ptr = type_string.endswith("::ConstPtr")
if is_ptr:
type_string = type_string[:-10]
else:
is_ptr = type_string.endswith("ConstPtr")
if is_ptr:
type_string = type_string[:-8]
if type_string.endswith("::Request"):
type_string = type_string[:-9]
if type_string.startswith("boost::function"):
type_string = type_string[52:-25]
return type_string.replace("::", "/")
def _extract_action(self, call):
name = "?"
if "SimpleActionServer" in call.canonical_type and len(call.arguments) > 2:
arg = call.arguments[1]
if not isinstance(arg, basestring):
arg = resolve_expression(arg)
if isinstance(arg, basestring):
name = arg.split()[-1].replace("'", "")
elif "SimpleActionClient" in call.canonical_type and len(call.arguments) > 1:
if isinstance(call.arguments[0], basestring):
name = call.arguments[0]
return name
def _extract_action_type(self, call):
type_string = call.template[0]
return type_string.replace("::", "/")
def _extract_queue_size(self, call, queue_pos=1):
queue_size = resolve_expression(call.arguments[queue_pos])
if isinstance(queue_size, (int, long, float)):
return queue_size
return None
class RospyExtractor(LoggingObject):
queue_size_pos = {
'publisher': 6,
'subscriber': 4,
}
rospy_names = {
'publication': ('Publisher',),
'subscription': ('Subscriber',),
'service-def': ('Service',),
'service-call': ('ServiceProxy',),
}
@classmethod
def all_rospy_names(cls, type):
names = cls.rospy_names[type]
return tuple('rospy.' + name for name in names) + names
@staticmethod
def get_arg(call, pos, name):
try:
return next(
keyword.value
for keyword in call.named_args
if keyword.name == name)
except StopIteration:
try:
return call.arguments[pos]
except IndexError:
return None
@staticmethod
def invalid_call(call):
return (len(call.arguments) + len(call.named_args)
+ bool(call.star_args) + bool(call.kw_args)) <= 1
@staticmethod
def split_ns_name(full_name):
if '/' in full_name:
ns, _, name = full_name.rpartition('/')
else:
ns, name = '', full_name
return ns, name
def _call_location(self, call):
try:
source_file = next(
sf
for sf in self.package.source_files
if sf.path == call.file)
except StopIteration:
souce_file = None
function = call.function
if function:
function = function.name
return Location(self.package, file=source_file, line=call.line,
fun=function)
@classmethod
def _extract_queue_size(cls, call):
pos = cls.queue_size_pos[call.name.lower()]
queue_size_arg = cls.get_arg(call, pos, 'queue_size')
try:
queue_size = resolve_expression(queue_size_arg)
assert(isinstance(queue_size, (int, long, float)))
return queue_size
except AssertionError:
return None
@classmethod
def _extract_message_type(cls, call, arg_name, msgs_imports, arg_pos=1):
msg_type = cls.get_arg(call, 1, arg_name)
for msg in msgs_imports:
if str(msg_type).replace("#","") in msg[1]:
msg_type = msg[0]+"/"+str(msg_type).replace("#","")
# Very common case of calling type() on a message class
if isinstance(msg_type, CodeFunctionCall) and msg_type.name == 'type':
msg_type = msg_type.arguments[0].name
if isinstance(msg_type, CodeReference):
msg_type = resolve_reference(msg_type) or msg_type
return str(msg_type)
@classmethod
def _extract_topic(cls, call):
name = resolve_expression(cls.get_arg(call, 0, 'name'))
if not isinstance(name, basestring):
name = '?'
return cls.split_ns_name(name)
def _on_client(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
cli = ServiceClientCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.client.append(cli)
self.log.debug("Found Client on %s/%s (%s)", ns, name, msg_type)
def _on_publication(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
pub = Publication(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.advertise.append(pub)
self.log.debug("Found Publication on %s/%s (%s)", ns, name, msg_type)
def _on_service(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'service_class', self.msgs_list)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
srv = ServiceServerCall(name, ns, msg_type, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.service.append(srv)
self.log.debug("Found Service on %s/%s (%s)", ns, name, msg_type)
def _on_subscription(self, node, call):
if self.invalid_call(call):
return
ns, name = self._extract_topic(call)
msg_type = self._extract_message_type(call, 'data_class', self.msgs_list)
queue_size = self._extract_queue_size(call)
depth = get_control_depth(call, recursive=True)
location = self._call_location(call)
conditions = [SourceCondition(pretty_str(c), location=location)
for c in get_conditions(call, recursive=True)]
sub = Subscription(name, ns, msg_type, queue_size, location=location,
control_depth=depth, conditions=conditions,
repeats=is_under_loop(call, recursive=True))
node.subscribe.append(sub)
self.log.debug("Found Subscription on %s/%s (%s)", ns, name, msg_type)
def _query_comm_primitives(self, node, gs):
##################################
# Topics
##################################
publications = (CodeQuery(gs).all_calls
.where_name(('Publisher', 'rospy.Publisher'))
.get())
subscriptions = (CodeQuery(gs).all_calls
.where_name(('Subscriber', 'rospy.Subscriber'))
.get())
for call in publications:
self._on_publication(node, call)
for call in subscriptions:
self._on_subscription(node, call)
##################################
# Services
##################################
service_defs = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-def'))
.get())
service_calls = (CodeQuery(gs).all_calls
.where_name(self.all_rospy_names('service-call'))
.get())
for call in service_defs:
self._on_service(node, call)
for call in service_calls:
self._on_client(node, call)
def _setup_path(self):
setup_file = os.path.join(self.package.path, 'setup.py')
if not os.path.isfile(setup_file):
return []
parser = PyAstParser(workspace=self.package.path)
setup = parser.parse(setup_file)
setup_call = (CodeQuery(setup).all_calls
.where_name('generate_distutils_setup')
.get()
or
CodeQuery(setup).all_calls
.where_name('setup')
.get())[0]
package_dir = self.get_arg(setup_call, 0, 'package_dir')
if hasattr(package_dir, 'value'):
package_dir = {
keyword.name: keyword.value
for keyword in self.get_arg(setup_call, 0, 'package_dir').value
}
else:
src_path = os.path.join(self.package.path, 'src')
package_dir = {'': 'src'} if os.path.exists(src_path) else {}
root = package_dir.get('', '')
return [os.path.join(self.package.path, root)]
def __init__(self, package, workspace):
self.package = package
self.workspace = workspace
self.pythonpath = self._setup_path()
def extract(self, node):
self.log.debug("Parsing Python files for node %s", node.id)
parser = PyAstParser(pythonpath=self.pythonpath,
workspace=self.workspace)
for sf in node.source_files:
self.log.debug("Parsing Python file %s", sf.path)
if parser.parse(sf.path) is None:
self.log.warning("no compile commands for " + sf.path)
node.source_tree = parser.global_scope
# In theory the imported names list should not be needed here, this is a fix to be able to locate the complete description of ros msgs types (i.e. PkgName/MsgName
self.msgs_list =[]
for i in parser.imported_names_list:
if "msg" in str(i) or "srv" in str(i):
self.msgs_list.append((i.split(".")[0],i.split(".")[2]))
# ----- queries after parsing, since global scope is reused -----------
self._query_comm_primitives(node, parser.global_scope)
# self._query_param_primitives(node, parser.global_scope)
| 42.875458
| 170
| 0.55425
|
b60d112171549ec9d61db2073c62fff691b831fc
| 1,883
|
py
|
Python
|
data/p3BR/R2/benchmark/startCirq69.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startCirq69.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p3BR/R2/benchmark/startCirq69.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=12
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.X.on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.CZ.on(input_qubit[2],input_qubit[1])) # number=8
c.append(cirq.H.on(input_qubit[1])) # number=9
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[1])) # number=4
c.append(cirq.Z.on(input_qubit[2])) # number=3
c.append(cirq.Y.on(input_qubit[2])) # number=5
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=10
c.append(cirq.CNOT.on(input_qubit[2],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq69.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 28.969231
| 77
| 0.693574
|
67373ebb8e36d13f2371458b9dcc08ec61f8b1de
| 2,513
|
py
|
Python
|
CIM14/CDPSM/Connectivity/IEC61970/Core/Equipment.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM14/CDPSM/Connectivity/IEC61970/Core/Equipment.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
CIM14/CDPSM/Connectivity/IEC61970/Core/Equipment.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Connectivity.IEC61970.Core.PowerSystemResource import PowerSystemResource
class Equipment(PowerSystemResource):
"""The parts of a power system that are physical devices, electronic or mechanical
"""
def __init__(self, EquipmentContainer=None, *args, **kw_args):
"""Initialises a new 'Equipment' instance.
@param EquipmentContainer: The association is used in the naming hierarchy.
"""
self._EquipmentContainer = None
self.EquipmentContainer = EquipmentContainer
super(Equipment, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["EquipmentContainer"]
_many_refs = []
def getEquipmentContainer(self):
"""The association is used in the naming hierarchy.
"""
return self._EquipmentContainer
def setEquipmentContainer(self, value):
if self._EquipmentContainer is not None:
filtered = [x for x in self.EquipmentContainer.Equipments if x != self]
self._EquipmentContainer._Equipments = filtered
self._EquipmentContainer = value
if self._EquipmentContainer is not None:
if self not in self._EquipmentContainer._Equipments:
self._EquipmentContainer._Equipments.append(self)
EquipmentContainer = property(getEquipmentContainer, setEquipmentContainer)
| 41.196721
| 90
| 0.728213
|
0291d9dc644017cfdb4307e0b5968cd27063b23f
| 7,809
|
py
|
Python
|
system/ota/framework/tools/hex2bin/hex2bin.py
|
Microchip-MPLAB-Harmony/wireless_system_pic32mzw1_wfi32e01
|
a66953e2aee01e92e4c0482fe1f1ab7786ffbb08
|
[
"0BSD"
] | null | null | null |
system/ota/framework/tools/hex2bin/hex2bin.py
|
Microchip-MPLAB-Harmony/wireless_system_pic32mzw1_wfi32e01
|
a66953e2aee01e92e4c0482fe1f1ab7786ffbb08
|
[
"0BSD"
] | null | null | null |
system/ota/framework/tools/hex2bin/hex2bin.py
|
Microchip-MPLAB-Harmony/wireless_system_pic32mzw1_wfi32e01
|
a66953e2aee01e92e4c0482fe1f1ab7786ffbb08
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
################################################################################
# Copyright 2020 Microchip Technology Inc. and its subsidiaries. You may use this
# software and any derivatives exclusively with Microchip products.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER EXPRESS,
# IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED WARRANTIES
# OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE, OR
# ITS INTERACTION WITH MICROCHIP PRODUCTS, COMBINATION WITH ANY OTHER PRODUCTS, OR
# USE IN ANY APPLICATION.
#
# IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
# INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND WHATSOEVER
# RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS BEEN ADVISED OF
# THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE FULLEST EXTENT ALLOWED
# BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS
# SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY
# TO MICROCHIP FOR THIS SOFTWARE.
#
# MICROCHIP PROVIDES THIS SOFTWARE CONDITIONALLY UPON YOUR ACCEPTANCE OF THESE
# TERMS.
################################################################################
import struct
import os
import argparse
import fnmatch
from colorama import init, Fore
import hashlib
def strtoul(str, base):
if base == None:
if len(str) >= 3 and str[0:2] == '0x':
base = 16
else:
base = 10
return int(str, base)
class INTEL_HEX(object):
RECORD_TYPE_DATA = 0x00
RECORD_TYPE_EOF = 0x01
RECORD_TYPE_EXT_SEGMENT_ADDR = 0x02
RECORD_TYPT_START_SEGMENT_ADDR = 0x03
RECORD_TYPE_EXT_LINEAR_ADDR = 0x04
RECORD_TYPE_START_LINEAR_ADDR = 0x05
class recode(object):
def __init__(self, line):
self.valid = 1 if line[0] == ':' else 0
bytes = [int(line[i:i+2], 16) for i in range(1, len(line), 2)]
self.count = bytes[0]
self.address = (bytes[1] << 8) + bytes[2]
self.type = bytes[3]
self.data = bytes[4:4+self.count]
self.xsum = bytes[-1]
self.len = len(self.data)
if sum(bytes) & 0xFF != 0:
self.valid = 0
print(bytes)
raise()
if self.type > 5:
self.valid = 0
def debug(self):
print("$G%02x$c%04x$M%02X$C%s$w%02x" % (
self.count,
self.address,
self.type,
''.join('{:02x}'.format(x) for x in self.data),
self.xsum))
def __init__(self, filename):
self.parsing = 1
self.file = open(filename, 'r')
if self.file == None:
print(Fore.RED+"Unable to open file: %s\n" % filename)
self.parsing = 0
def __del__(self):
if self.file != None:
self.file.close()
self.file = None
self.parsing = 0
def get_record(self):
r = None
if self.parsing != 0:
line = self.file.readline()
if line != "":
r = self.recode(line.rstrip('\n'))
if r == None or r.valid == 0:
self.parsing = 0
r = None
return r
def hex2bin(input_file, output_file, upper_limit, lower_limit, trim_tailing_space=0):
intel_hex = INTEL_HEX(input_file)
address_hi = 0
address_highest = 0
prog = []
status = 0
if upper_limit <= lower_limit:
print(Fore.RED+"Invalid Parameter uppler_limit < lower_limit\n")
return -1
for i in range(0, (upper_limit - lower_limit)):
prog.append(0xff)
while 1:
record = intel_hex.get_record()
if record == None:
break
if record.type == INTEL_HEX.RECORD_TYPE_DATA:
address = address_hi + record.address
if (lower_limit <= address) and ((address + record.count) <= upper_limit):
if address_highest < (address + record.count):
address_highest = address + record.count
#print("%08X %s" % (address - lower_limit, ''.join('{:02x}'.format(x) for x in record.data)))
for i in range(0, record.count):
prog[address - lower_limit + i] = record.data[i]
elif record.type == INTEL_HEX.RECORD_TYPE_EXT_LINEAR_ADDR:
address_hi = (record.data[0] << 24) | (record.data[1] << 16)
elif record.type == INTEL_HEX.RECORD_TYPE_EOF:
break
else:
print(Fore.RED+"Invalid Record Type(%02X)\n" % record.type)
status = -1
if status == 0:
bin = open(output_file, "wb")
if trim_tailing_space == 0:
address_highest = upper_limit
print(Fore.GREEN+"Generating %s (%08X ~ %08X %d bytes)\n" %
(output_file, lower_limit, address_highest, (address_highest - lower_limit)))
for i in range(0, address_highest - lower_limit):
bin.write(struct.pack('B', prog[i]))
bin.close()
return status
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error(Fore.RED+"The file \"%s\" does not exist!" % arg)
else:
return os.path.abspath(arg)
def find_file(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
if __name__ == "__main__":
init(autoreset=True)
print(Fore.GREEN+"hex2bin V1.2")
parser = argparse.ArgumentParser(
description="Tool to convert hex file into an OTA bin file for WFI32", prog="hex2bin")
parser.add_argument('-i', '--input-hex', dest='production_hex', action='store', metavar='',
help='Location of the hex file to convert to OTA bin', type=lambda x: is_valid_file(parser, x))
parser.add_argument('-z', '--slot-size', dest='IMAGESTORE_SLOT_SIZE', action='store',
metavar='', help='Max slot size of the image in hex', default="0xE5000")
parser.add_argument('-s', '--start-addr', dest='APP_IMG_SLOT_START', action='store',
metavar='', help='Start address of application image in hex', default="0x10018000")
parser.add_argument('-o', '--output-bin', dest='production_bin',
action='store', metavar='', help='Location of the output ota bin file')
args = parser.parse_args()
APP_IMG_SLOT_END = int(args.APP_IMG_SLOT_START, base=16) + \
int(args.IMAGESTORE_SLOT_SIZE, base=16)
production_hex = None
if args.production_hex is None:
hexList = find_file('*.hex', '..\\..\\firmware')
for hexFile in hexList:
if hexFile.find('unified.hex') == -1:
production_hex = os.path.abspath(hexFile)
print(f'Converting file to bin: {Fore.YELLOW}{production_hex}')
break
if production_hex is None:
print(Fore.RED+"\nCould not locate hex file. Use the -i flag.\n")
parser.print_help()
exit()
else:
production_hex = args.production_hex
if args.production_bin is None:
production_bin = os.path.splitext(production_hex)[0]+'.bin'
else:
production_bin = args.production_bin
ret=hex2bin(production_hex, production_bin, APP_IMG_SLOT_END,
int(args.APP_IMG_SLOT_START, base=16), 1)
if not ret:
with open(production_bin,"rb") as f:
bytes = f.read()
readable_hash = hashlib.sha256(bytes).hexdigest()
print(f'{Fore.YELLOW}"Digest":"{readable_hash}"\n')
| 36.32093
| 119
| 0.588808
|
e43a035e7a1983934152ddd5fc8c8c4a4e80aeed
| 5,792
|
py
|
Python
|
trough/utils.py
|
gregstarr/trough
|
5ec89cf342b3fc63fb13223bd4d3a9f20cb5d4eb
|
[
"MIT"
] | 1
|
2022-03-31T00:48:33.000Z
|
2022-03-31T00:48:33.000Z
|
trough/utils.py
|
gregstarr/trough
|
5ec89cf342b3fc63fb13223bd4d3a9f20cb5d4eb
|
[
"MIT"
] | 11
|
2022-03-08T10:44:07.000Z
|
2022-03-31T14:14:36.000Z
|
trough/utils.py
|
gregstarr/trough
|
5ec89cf342b3fc63fb13223bd4d3a9f20cb5d4eb
|
[
"MIT"
] | null | null | null |
import numpy as np
import datetime
import warnings
import logging
import xarray as xr
try:
import h5py
from skimage.util import view_as_windows
except ImportError as imp_err:
warnings.warn(f"Packages required for recreating dataset not installed: {imp_err}")
logger = logging.getLogger(__name__)
def datetime64_to_datetime(dt64):
"""Convert single datetime64 to datetime
Parameters
----------
dt64: numpy.ndarray[datetime64]
Returns
-------
list[datetime]
"""
ts = dt64.astype('datetime64[s]').astype(float)
if isinstance(ts, np.ndarray):
return [datetime.datetime.utcfromtimestamp(t) for t in ts]
return datetime.datetime.utcfromtimestamp(ts)
def decompose_datetime64(dt64):
"""Convert array of np.datetime64 to an array (N x 3) of year, month (jan=1), day (1 index)
Parameters
----------
dt64: numpy.ndarray[datetime64]
Returns
-------
idx: numpy.ndarray (N x 3)
"""
year_floor = dt64.astype('datetime64[Y]')
month_floor = dt64.astype('datetime64[M]')
year = year_floor.astype(int) + 1970
month = (dt64.astype('datetime64[M]') - year_floor).astype(int) + 1
day = (dt64.astype('datetime64[D]') - month_floor).astype(int) + 1
return np.column_stack((year, month, day))
def centered_bn_func(func, arr, window_diameter, pad=False, **kwargs):
"""Call a centered bottleneck moving window function on an array, optionally padding with the edge values to keep
the same shape. Window moves through axis 0.
Parameters
----------
func: bottleneck moving window function
arr: numpy.ndarray
window_diameter: int
odd number window width
pad: bool
whether to pad to keep same shape or not
kwargs
passed to func
Returns
-------
numpy.ndarray
"""
window_radius = window_diameter // 2
assert (2 * window_radius + 1) == window_diameter, "window_diameter must be odd"
if pad:
pad_tuple = ((window_radius, window_radius), ) + ((0, 0), ) * (arr.ndim - 1)
arr = np.pad(arr, pad_tuple, mode='edge')
return func(arr, window_diameter, **kwargs)[2 * window_radius:]
def moving_func_trim(window_diameter, *arrays):
"""Trim any number of arrays to valid dimension after calling a centered bottleneck moving window function
Parameters
----------
window_diameter: int
odd number window width
arrays: 1 or more numpy.ndarray
Returns
-------
tuple of numpy.ndarrays
"""
window_radius = window_diameter // 2
assert (2 * window_radius + 1) == window_diameter, "window_diameter must be odd"
if window_radius == 0:
return (array for array in arrays)
return (array[window_radius:-window_radius] for array in arrays)
def extract_patches(arr, patch_shape, step=1):
"""Assuming `arr` is 3D (time, lat, lon). `arr` will be padded, then have patches extracted using
`skimage.util.view_as_windows`. The padding will be "edge" for lat, and "wrap" for lon, with no padding for
time. Returned array will have same lat and lon dimension length as input and a different time dimension length
depending on `patch_shape`.
Parameters
----------
arr: numpy.ndarray
must be 3 dimensional
patch_shape: tuple
must be length 3
step: int
Returns
-------
patches view of padded array
shape (arr.shape[0] - patch_shape[0] + 1, arr.shape[1], arr.shape[2]) + patch_shape
"""
assert arr.ndim == 3 and len(patch_shape) == 3, "Invalid input args"
# lat padding
padded = np.pad(arr, ((0, 0), (patch_shape[1] // 2, patch_shape[1] // 2), (0, 0)), 'edge')
# lon padding
padded = np.pad(padded, ((0, 0), (0, 0), (patch_shape[2] // 2, patch_shape[2] // 2)), 'wrap')
patches = view_as_windows(padded, patch_shape, step)
return patches
def write_h5(fn, **kwargs):
"""Writes an h5 file with data specified by kwargs.
"""
with h5py.File(fn, 'w') as f:
for key, value in kwargs.items():
f.create_dataset(key, data=value)
def get_data_checker(data_getter):
def check(start, end, dt, hemisphere, processed_file):
times = np.arange(np.datetime64(start), np.datetime64(end), dt).astype('datetime64[s]')
if processed_file.exists():
logger.info(f"processed file already exists {processed_file=}, checking...")
try:
data_check = data_getter(start, end, hemisphere, processed_file.parent)
data_check = data_check.sel(time=times)
has_missing_data = data_check.isnull().all(axis=[i for i in range(1, data_check.ndim)]).any()
if not has_missing_data:
logger.info(f"downloaded data already processed {processed_file=}, checking...")
return False
except KeyError:
logger.info(f"processed file doesn't have the requested data")
except Exception as e:
logger.info(f"error reading processed file {processed_file=}: {e}, removing and reprocessing")
processed_file.unlink()
return True
return check
def read_netcdfs(files, dim):
"""https://xarray.pydata.org/en/stable/user-guide/io.html#reading-multi-file-datasets
"""
def process_one_path(path):
# use a context manager, to ensure the file gets closed after use
with xr.open_dataarray(path) as ds:
# load all data from the transformed dataset, to ensure we can
# use it after closing each original file
ds.load()
return ds
paths = sorted(files)
datasets = [process_one_path(p) for p in paths]
combined = xr.concat(datasets, dim)
return combined
| 33.287356
| 117
| 0.64261
|
b2ae2e40ea860cfc970074bb06317d29eb595926
| 861
|
py
|
Python
|
translate.py
|
sekharvth/comment-filter-unsupervised
|
7c34655aac1f4bf3daa34da6dbc1b0bdcc337364
|
[
"MIT"
] | null | null | null |
translate.py
|
sekharvth/comment-filter-unsupervised
|
7c34655aac1f4bf3daa34da6dbc1b0bdcc337364
|
[
"MIT"
] | null | null | null |
translate.py
|
sekharvth/comment-filter-unsupervised
|
7c34655aac1f4bf3daa34da6dbc1b0bdcc337364
|
[
"MIT"
] | null | null | null |
# first translate the given comments to English
agent = {'User-Agent':
"Mozilla/4.0 (\
compatible;\
MSIE 6.0;\
Windows NT 5.1;\
SV1;\
.NET CLR 1.1.4322;\
.NET CLR 2.0.50727;\
.NET CLR 3.0.04506.30\
)"}
def unescape(text):
parser = html.parser.HTMLParser()
return (parser.unescape(text))
def translate(sent_to_translate, to_language="auto", from_language="auto"):
sent_to_translate = urllib.parse.quote(sent_to_translate)
link = "https://translate.google.com/m?hl={}&sl={}&q={}".format(to_language, from_language, sent_to_translate)
request = urllib.request.Request(link, headers=agent)
data = urllib.request.urlopen(request).read().decode("utf-8")
translation = re.findall(r'class="t0">(.*?)<', data)
if (len(translation) == 0):
result = ''
else:
result = unescape(translation[0])
return result
| 26.090909
| 114
| 0.665505
|
1ba389d03f01ea1296cdfa474d05602fbb276524
| 43,042
|
py
|
Python
|
HARK/ConsumptionSaving/ConsIndShockModelFast.py
|
nicksawhney/HARK
|
f7608a96c3b491f9cf605472768dd996eb624f76
|
[
"Apache-2.0"
] | null | null | null |
HARK/ConsumptionSaving/ConsIndShockModelFast.py
|
nicksawhney/HARK
|
f7608a96c3b491f9cf605472768dd996eb624f76
|
[
"Apache-2.0"
] | null | null | null |
HARK/ConsumptionSaving/ConsIndShockModelFast.py
|
nicksawhney/HARK
|
f7608a96c3b491f9cf605472768dd996eb624f76
|
[
"Apache-2.0"
] | null | null | null |
"""
Classes to solve canonical consumption-savings models with idiosyncratic shocks
to income. All models here assume CRRA utility with geometric discounting, no
bequest motive, and income shocks are fully transitory or fully permanent.
It currently solves three types of models:
1) A very basic "perfect foresight" consumption-savings model with no uncertainty.
2) A consumption-savings model with risk over transitory and permanent income shocks.
3) The model described in (2), with an interest rate for debt that differs
from the interest rate for savings. #todo
See `NARK <https://github.com/econ-ark/HARK/blob/master/Documentation/NARK/NARK.pdf>`_ for information on variable naming conventions. See `hark.readthedocs.io <https://hark.readthedocs.io>`_ for mathematical descriptions of the models being solved.
"""
from copy import deepcopy
import numpy as np
from interpolation import interp
from numba import njit
from quantecon.optimize import newton_secant
from HARK import make_one_period_oo_solver, MetricObject
from HARK.ConsumptionSaving.ConsIndShockModel import (
ConsumerSolution,
ConsPerfForesightSolver,
ConsIndShockSolverBasic,
PerfForesightConsumerType,
IndShockConsumerType,
)
from HARK.interpolation import (
LinearInterp,
LowerEnvelope,
CubicInterp,
ValueFuncCRRA,
MargValueFuncCRRA,
MargMargValueFuncCRRA,
)
from HARK.numba import (
CRRAutility,
CRRAutilityP,
CRRAutilityPP,
CRRAutilityP_inv,
CRRAutility_invP,
CRRAutility_inv,
CRRAutilityP_invP,
)
from HARK.numba import linear_interp_fast, cubic_interp_fast, linear_interp_deriv_fast
__all__ = [
"PerfForesightSolution",
"IndShockSolution",
"ConsPerfForesightSolverFast",
"ConsIndShockSolverBasicFast",
"ConsIndShockSolverFast",
"PerfForesightConsumerTypeFast",
"IndShockConsumerTypeFast",
]
utility = CRRAutility
utilityP = CRRAutilityP
utilityPP = CRRAutilityPP
utilityP_inv = CRRAutilityP_inv
utility_invP = CRRAutility_invP
utility_inv = CRRAutility_inv
utilityP_invP = CRRAutilityP_invP
# =====================================================================
# === Classes that help solve consumption-saving models ===
# =====================================================================
class PerfForesightSolution(MetricObject):
"""
A class representing the solution of a single period of a consumption-saving
perfect foresight problem.
Here and elsewhere in the code, Nrm indicates that variables are normalized
by permanent income.
Parameters
----------
mNrm: np.array
(Normalized) corresponding market resource points for interpolation.
cNrm : np.array
(Normalized) consumption points for interpolation.
vFuncNvrsSlope: float
Constant slope of inverse value vFuncNvrs
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
"""
distance_criteria = ["cNrm", "mNrm"]
def __init__(
self,
mNrm=np.array([0.0, 1.0]),
cNrm=np.array([0.0, 1.0]),
vFuncNvrsSlope=0.0,
mNrmMin=0.0,
hNrm=0.0,
MPCmin=1.0,
MPCmax=1.0,
):
self.mNrm = mNrm
self.cNrm = cNrm
self.vFuncNvrsSlope = vFuncNvrsSlope
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
class IndShockSolution(MetricObject):
"""
A class representing the solution of a single period of a consumption-saving
idiosyncratic shocks to permanent and transitory income problem.
Parameters
----------
mNrm: np.array
(Normalized) corresponding market resource points for interpolation.
cNrm : np.array
(Normalized) consumption points for interpolation.
vFuncNvrsSlope: float
Constant slope of inverse value ``vFuncNvrs``
mNrmMin : float
The minimum allowable market resources for this period; the consump-
tion function (etc) are undefined for m < mNrmMin.
hNrm : float
Human wealth after receiving income this period: PDV of all future
income, ignoring mortality.
MPCmin : float
Infimum of the marginal propensity to consume this period.
MPC --> MPCmin as m --> infinity.
MPCmax : float
Supremum of the marginal propensity to consume this period.
MPC --> MPCmax as m --> mNrmMin.
"""
distance_criteria = ["cNrm", "mNrm", "mNrmMin"]
def __init__(
self,
mNrm=np.linspace(0, 1),
cNrm=np.linspace(0, 1),
cFuncLimitIntercept=None,
cFuncLimitSlope=None,
mNrmMin=0.0,
hNrm=0.0,
MPCmin=1.0,
MPCmax=1.0,
Ex_IncNext=0.0,
MPC=None,
mNrmGrid=None,
vNvrs=None,
vNvrsP=None,
MPCminNvrs=None,
):
self.mNrm = mNrm
self.cNrm = cNrm
self.cFuncLimitIntercept = cFuncLimitIntercept
self.cFuncLimitSlope = cFuncLimitSlope
self.mNrmMin = mNrmMin
self.hNrm = hNrm
self.MPCmin = MPCmin
self.MPCmax = MPCmax
self.Ex_IncNext = Ex_IncNext
self.mNrmGrid = mNrmGrid
self.vNvrs = vNvrs
self.MPCminNvrs = MPCminNvrs
self.MPC = MPC
self.vNvrsP = vNvrsP
# =====================================================================
# === Classes and functions that solve consumption-saving models ===
# =====================================================================
@njit(cache=True)
def _find_mNrmStE(m, Rfree, PermGroFac, mNrm, cNrm, Ex_IncNext):
# Make a linear function of all combinations of c and m that yield mNext = mNow
mZeroChange = (1.0 - PermGroFac / Rfree) * m + (PermGroFac / Rfree) * Ex_IncNext
# Find the steady state level of market resources
res = interp(mNrm, cNrm, m) - mZeroChange
# A zero of this is SS market resources
return res
# @njit(cache=True) can't cache because of use of globals, perhaps newton_secant?
@njit
def _add_mNrmStENumba(
Rfree, PermGroFac, mNrm, cNrm, mNrmMin, Ex_IncNext, _find_mNrmStE
):
"""
Finds steady state (normalized) market resources and adds it to the
solution. This is the level of market resources such that the expectation
of market resources in the next period is unchanged. This value doesn't
necessarily exist.
"""
# Minimum market resources plus next income is okay starting guess
m_init_guess = mNrmMin + Ex_IncNext
mNrmStE = newton_secant(
_find_mNrmStE,
m_init_guess,
args=(Rfree, PermGroFac, mNrm, cNrm, Ex_IncNext),
disp=False,
)
if mNrmStE.converged:
return mNrmStE.root
else:
return None
@njit(cache=True)
def _solveConsPerfForesightNumba(
DiscFac,
LivPrb,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
MaxKinks,
mNrmNext,
cNrmNext,
hNrmNext,
MPCminNext,
):
"""
Makes the (linear) consumption function for this period.
"""
DiscFacEff = DiscFac * LivPrb
# Calculate human wealth this period
hNrmNow = (PermGroFac / Rfree) * (hNrmNext + 1.0)
# Calculate the lower bound of the marginal propensity to consume
PatFac = ((Rfree * DiscFacEff) ** (1.0 / CRRA)) / Rfree
MPCmin = 1.0 / (1.0 + PatFac / MPCminNext)
# Extract the discrete kink points in next period's consumption function;
# don't take the last one, as it only defines the extrapolation and is not a kink.
mNrmNext = mNrmNext[:-1]
cNrmNext = cNrmNext[:-1]
# Calculate the end-of-period asset values that would reach those kink points
# next period, then invert the first order condition to get consumption. Then
# find the endogenous gridpoint (kink point) today that corresponds to each kink
aNrmNow = (PermGroFac / Rfree) * (mNrmNext - 1.0)
cNrmNow = (DiscFacEff * Rfree) ** (-1.0 / CRRA) * (PermGroFac * cNrmNext)
mNrmNow = aNrmNow + cNrmNow
# Add an additional point to the list of gridpoints for the extrapolation,
# using the new value of the lower bound of the MPC.
mNrmNow = np.append(mNrmNow, mNrmNow[-1] + 1.0)
cNrmNow = np.append(cNrmNow, cNrmNow[-1] + MPCmin)
# If the artificial borrowing constraint binds, combine the constrained and
# unconstrained consumption functions.
if BoroCnstArt > mNrmNow[0]:
# Find the highest index where constraint binds
cNrmCnst = mNrmNow - BoroCnstArt
CnstBinds = cNrmCnst < cNrmNow
idx = np.where(CnstBinds)[0][-1]
if idx < (mNrmNow.size - 1):
# If it is not the *very last* index, find the the critical level
# of mNrm where the artificial borrowing contraint begins to bind.
d0 = cNrmNow[idx] - cNrmCnst[idx]
d1 = cNrmCnst[idx + 1] - cNrmNow[idx + 1]
m0 = mNrmNow[idx]
m1 = mNrmNow[idx + 1]
alpha = d0 / (d0 + d1)
mCrit = m0 + alpha * (m1 - m0)
# Adjust the grids of mNrm and cNrm to account for the borrowing constraint.
cCrit = mCrit - BoroCnstArt
mNrmNow = np.concatenate(
(np.array([BoroCnstArt, mCrit]), mNrmNow[(idx + 1) :])
)
cNrmNow = np.concatenate((np.array([0.0, cCrit]), cNrmNow[(idx + 1) :]))
else:
# If it *is* the very last index, then there are only three points
# that characterize the consumption function: the artificial borrowing
# constraint, the constraint kink, and the extrapolation point.
mXtra = cNrmNow[-1] - cNrmCnst[-1] / (1.0 - MPCmin)
mCrit = mNrmNow[-1] + mXtra
cCrit = mCrit - BoroCnstArt
mNrmNow = np.array([BoroCnstArt, mCrit, mCrit + 1.0])
cNrmNow = np.array([0.0, cCrit, cCrit + MPCmin])
# If the mNrm and cNrm grids have become too large, throw out the last
# kink point, being sure to adjust the extrapolation.
if mNrmNow.size > MaxKinks:
mNrmNow = np.concatenate((mNrmNow[:-2], np.array([mNrmNow[-3] + 1.0])))
cNrmNow = np.concatenate((cNrmNow[:-2], np.array([cNrmNow[-3] + MPCmin])))
# Calculate the upper bound of the MPC as the slope of the bottom segment.
MPCmax = (cNrmNow[1] - cNrmNow[0]) / (mNrmNow[1] - mNrmNow[0])
# Add attributes to enable calculation of steady state market resources.
# Relabeling for compatibility with add_mNrmStE
mNrmMinNow = mNrmNow[0]
# See the PerfForesightConsumerType.ipynb documentation notebook for the derivations
vFuncNvrsSlope = MPCmin ** (-CRRA / (1.0 - CRRA))
return (
mNrmNow,
cNrmNow,
vFuncNvrsSlope,
mNrmMinNow,
hNrmNow,
MPCmin,
MPCmax,
)
class ConsPerfForesightSolverFast(ConsPerfForesightSolver):
"""
A class for solving a one period perfect foresight consumption-saving problem.
An instance of this class is created by the function solvePerfForesight in each period.
"""
def solve(self):
"""
Solves the one period perfect foresight consumption-saving problem.
Parameters
----------
None
Returns
-------
solution : PerfForesightSolution
The solution to this period's problem.
"""
# Use a local value of BoroCnstArt to prevent comparing None and float below.
if self.BoroCnstArt is None:
BoroCnstArt = -np.inf
else:
BoroCnstArt = self.BoroCnstArt
(
self.mNrmNow,
self.cNrmNow,
self.vFuncNvrsSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCmin,
self.MPCmax,
) = _solveConsPerfForesightNumba(
self.DiscFac,
self.LivPrb,
self.CRRA,
self.Rfree,
self.PermGroFac,
BoroCnstArt,
self.MaxKinks,
self.solution_next.mNrm,
self.solution_next.cNrm,
self.solution_next.hNrm,
self.solution_next.MPCmin,
)
solution = PerfForesightSolution(
self.mNrmNow,
self.cNrmNow,
self.vFuncNvrsSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCmin,
self.MPCmax,
)
return solution
@njit(cache=True)
def _np_tile(A, reps):
return A.repeat(reps[0]).reshape(A.size, -1).transpose()
@njit(cache=True)
def _np_insert(arr, obj, values, axis=-1):
return np.append(np.array(values), arr)
@njit(cache=True)
def _prepare_to_solveConsIndShockNumba(
DiscFac,
LivPrb,
CRRA,
Rfree,
PermGroFac,
BoroCnstArt,
aXtraGrid,
hNrmNext,
mNrmMinNext,
MPCminNext,
MPCmaxNext,
PermShkValsNext,
TranShkValsNext,
ShkPrbsNext,
):
"""
Unpacks some of the inputs (and calculates simple objects based on them),
storing the results in self for use by other methods. These include:
income shocks and probabilities, next period's marginal value function
(etc), the probability of getting the worst income shock next period,
the patience factor, human wealth, and the bounding MPCs.
Defines the constrained portion of the consumption function as cFuncNowCnst,
an attribute of self. Uses the artificial and natural borrowing constraints.
"""
DiscFacEff = DiscFac * LivPrb # "effective" discount factor
PermShkMinNext = np.min(PermShkValsNext)
TranShkMinNext = np.min(TranShkValsNext)
WorstIncPrb = np.sum(
ShkPrbsNext[
(PermShkValsNext * TranShkValsNext) == (PermShkMinNext * TranShkMinNext)
]
)
# Update the bounding MPCs and PDV of human wealth:
PatFac = ((Rfree * DiscFacEff) ** (1.0 / CRRA)) / Rfree
MPCminNow = 1.0 / (1.0 + PatFac / MPCminNext)
Ex_IncNext = np.dot(ShkPrbsNext, TranShkValsNext * PermShkValsNext)
hNrmNow = PermGroFac / Rfree * (Ex_IncNext + hNrmNext)
MPCmaxNow = 1.0 / (1.0 + (WorstIncPrb ** (1.0 / CRRA)) * PatFac / MPCmaxNext)
cFuncLimitIntercept = MPCminNow * hNrmNow
cFuncLimitSlope = MPCminNow
# Calculate the minimum allowable value of money resources in this period
BoroCnstNat = (mNrmMinNext - TranShkMinNext) * (PermGroFac * PermShkMinNext) / Rfree
# Note: need to be sure to handle BoroCnstArt==None appropriately.
# In Py2, this would evaluate to 5.0: np.max([None, 5.0]).
# However in Py3, this raises a TypeError. Thus here we need to directly
# address the situation in which BoroCnstArt == None:
if BoroCnstArt is None:
mNrmMinNow = BoroCnstNat
else:
mNrmMinNow = np.max(np.array([BoroCnstNat, BoroCnstArt]))
if BoroCnstNat < mNrmMinNow:
MPCmaxEff = 1.0 # If actually constrained, MPC near limit is 1
else:
MPCmaxEff = MPCmaxNow
"""
Prepare to calculate end-of-period marginal value by creating an array
of market resources that the agent could have next period, considering
the grid of end-of-period assets and the distribution of shocks he might
experience next period.
"""
# We define aNrmNow all the way from BoroCnstNat up to max(self.aXtraGrid)
# even if BoroCnstNat < BoroCnstArt, so we can construct the consumption
# function as the lower envelope of the (by the artificial borrowing con-
# straint) uconstrained consumption function, and the artificially con-
# strained consumption function.
aNrmNow = np.asarray(aXtraGrid) + BoroCnstNat
ShkCount = TranShkValsNext.size
aNrm_temp = _np_tile(aNrmNow, (ShkCount, 1))
# Tile arrays of the income shocks and put them into useful shapes
aNrmCount = aNrmNow.shape[0]
PermShkVals_temp = (_np_tile(PermShkValsNext, (aNrmCount, 1))).transpose()
TranShkVals_temp = (_np_tile(TranShkValsNext, (aNrmCount, 1))).transpose()
ShkPrbs_temp = (_np_tile(ShkPrbsNext, (aNrmCount, 1))).transpose()
# Get cash on hand next period
mNrmNext = Rfree / (PermGroFac * PermShkVals_temp) * aNrm_temp + TranShkVals_temp
# CDC 20191205: This should be divided by LivPrb[0] for Blanchard insurance
return (
DiscFacEff,
BoroCnstNat,
cFuncLimitIntercept,
cFuncLimitSlope,
mNrmMinNow,
hNrmNow,
MPCminNow,
MPCmaxNow,
MPCmaxEff,
Ex_IncNext,
mNrmNext,
PermShkVals_temp,
ShkPrbs_temp,
aNrmNow,
)
@njit(cache=True)
def _solveConsIndShockLinearNumba(
mNrmMinNext,
mNrmNext,
CRRA,
mNrmUnc,
cNrmUnc,
DiscFacEff,
Rfree,
PermGroFac,
PermShkVals_temp,
ShkPrbs_temp,
aNrmNow,
BoroCnstNat,
cFuncInterceptNext,
cFuncSlopeNext,
):
"""
Calculate end-of-period marginal value of assets at each point in aNrmNow.
Does so by taking a weighted sum of next period marginal values across
income shocks (in a preconstructed grid self.mNrmNext).
"""
mNrmCnst = np.array([mNrmMinNext, mNrmMinNext + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNextCnst = linear_interp_fast(mNrmNext.flatten(), mNrmCnst, cNrmCnst)
cFuncNextUnc = linear_interp_fast(
mNrmNext.flatten(), mNrmUnc, cNrmUnc, cFuncInterceptNext, cFuncSlopeNext
)
cFuncNext = np.minimum(cFuncNextCnst, cFuncNextUnc)
vPfuncNext = utilityP(cFuncNext, CRRA).reshape(mNrmNext.shape)
EndOfPrdvP = (
DiscFacEff
* Rfree
* PermGroFac ** (-CRRA)
* np.sum(PermShkVals_temp ** (-CRRA) * vPfuncNext * ShkPrbs_temp, axis=0)
)
# Finds interpolation points (c,m) for the consumption function.
cNrmNow = utilityP_inv(EndOfPrdvP, CRRA)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
cNrm = _np_insert(cNrmNow, 0, 0.0, axis=-1)
mNrm = _np_insert(mNrmNow, 0, BoroCnstNat, axis=-1)
return (cNrm, mNrm, EndOfPrdvP)
class ConsIndShockSolverBasicFast(ConsIndShockSolverBasic):
"""
This class solves a single period of a standard consumption-saving problem,
using linear interpolation and without the ability to calculate the value
function. ConsIndShockSolver inherits from this class and adds the ability
to perform cubic interpolation and to calculate the value function.
Note that this class does not have its own initializing method. It initial-
izes the same problem in the same way as ConsIndShockSetup, from which it
inherits.
"""
def prepare_to_solve(self):
"""
Perform preparatory work before calculating the unconstrained consumption
function.
Parameters
----------
none
Returns
-------
none
"""
self.ShkPrbsNext = self.IncShkDstn.pmf
self.PermShkValsNext = self.IncShkDstn.X[0]
self.TranShkValsNext = self.IncShkDstn.X[1]
(
self.DiscFacEff,
self.BoroCnstNat,
self.cFuncLimitIntercept,
self.cFuncLimitSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCminNow,
self.MPCmaxNow,
self.MPCmaxEff,
self.Ex_IncNext,
self.mNrmNext,
self.PermShkVals_temp,
self.ShkPrbs_temp,
self.aNrmNow,
) = _prepare_to_solveConsIndShockNumba(
self.DiscFac,
self.LivPrb,
self.CRRA,
self.Rfree,
self.PermGroFac,
self.BoroCnstArt,
self.aXtraGrid,
self.solution_next.hNrm,
self.solution_next.mNrmMin,
self.solution_next.MPCmin,
self.solution_next.MPCmax,
self.PermShkValsNext,
self.TranShkValsNext,
self.ShkPrbsNext,
)
def solve(self):
"""
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
"""
self.cNrm, self.mNrm, self.EndOfPrdvP = _solveConsIndShockLinearNumba(
self.solution_next.mNrmMin,
self.mNrmNext,
self.CRRA,
self.solution_next.mNrm,
self.solution_next.cNrm,
self.DiscFacEff,
self.Rfree,
self.PermGroFac,
self.PermShkVals_temp,
self.ShkPrbs_temp,
self.aNrmNow,
self.BoroCnstNat,
self.solution_next.cFuncLimitIntercept,
self.solution_next.cFuncLimitSlope,
)
# Pack up the solution and return it
solution = IndShockSolution(
self.mNrm,
self.cNrm,
self.cFuncLimitIntercept,
self.cFuncLimitSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCminNow,
self.MPCmaxEff,
self.Ex_IncNext,
)
return solution
@njit(cache=True)
def _solveConsIndShockCubicNumba(
mNrmMinNext,
mNrmNext,
mNrmUnc,
cNrmUnc,
MPCNext,
cFuncInterceptNext,
cFuncSlopeNext,
CRRA,
DiscFacEff,
Rfree,
PermGroFac,
PermShkVals_temp,
ShkPrbs_temp,
aNrmNow,
BoroCnstNat,
MPCmaxNow,
):
mNrmCnst = np.array([mNrmMinNext, mNrmMinNext + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNextCnst, MPCNextCnst = linear_interp_deriv_fast(
mNrmNext.flatten(), mNrmCnst, cNrmCnst
)
cFuncNextUnc, MPCNextUnc = cubic_interp_fast(
mNrmNext.flatten(),
mNrmUnc,
cNrmUnc,
MPCNext,
cFuncInterceptNext,
cFuncSlopeNext,
)
cFuncNext = np.where(cFuncNextCnst <= cFuncNextUnc, cFuncNextCnst, cFuncNextUnc)
vPfuncNext = utilityP(cFuncNext, CRRA).reshape(mNrmNext.shape)
EndOfPrdvP = (
DiscFacEff
* Rfree
* PermGroFac ** (-CRRA)
* np.sum(PermShkVals_temp ** (-CRRA) * vPfuncNext * ShkPrbs_temp, axis=0)
)
# Finds interpolation points (c,m) for the consumption function.
cNrmNow = EndOfPrdvP ** (-1.0 / CRRA)
mNrmNow = cNrmNow + aNrmNow
# Limiting consumption is zero as m approaches mNrmMin
cNrm = _np_insert(cNrmNow, 0, 0.0, axis=-1)
mNrm = _np_insert(mNrmNow, 0, BoroCnstNat, axis=-1)
"""
Makes a cubic spline interpolation of the unconstrained consumption
function for this period.
"""
MPCinterpNext = np.where(cFuncNextCnst <= cFuncNextUnc, MPCNextCnst, MPCNextUnc)
vPPfuncNext = (MPCinterpNext * utilityPP(cFuncNext, CRRA)).reshape(mNrmNext.shape)
EndOfPrdvPP = (
DiscFacEff
* Rfree
* Rfree
* PermGroFac ** (-CRRA - 1.0)
* np.sum(PermShkVals_temp ** (-CRRA - 1.0) * vPPfuncNext * ShkPrbs_temp, axis=0)
)
dcda = EndOfPrdvPP / utilityPP(cNrm[1:], CRRA)
MPC = dcda / (dcda + 1.0)
MPC = _np_insert(MPC, 0, MPCmaxNow)
return cNrm, mNrm, MPC, EndOfPrdvP
@njit(cache=True)
def _cFuncCubic(aXtraGrid, mNrmMinNow, mNrmNow, cNrmNow, MPCNow, MPCminNow, hNrmNow):
mNrmGrid = mNrmMinNow + aXtraGrid
mNrmCnst = np.array([mNrmMinNow, mNrmMinNow + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNowCnst = linear_interp_fast(mNrmGrid.flatten(), mNrmCnst, cNrmCnst)
cFuncNowUnc, MPCNowUnc = cubic_interp_fast(
mNrmGrid.flatten(), mNrmNow, cNrmNow, MPCNow, MPCminNow * hNrmNow, MPCminNow
)
cNrmNow = np.where(cFuncNowCnst <= cFuncNowUnc, cFuncNowCnst, cFuncNowUnc)
return cNrmNow, mNrmGrid
@njit(cache=True)
def _cFuncLinear(aXtraGrid, mNrmMinNow, mNrmNow, cNrmNow, MPCminNow, hNrmNow):
mNrmGrid = mNrmMinNow + aXtraGrid
mNrmCnst = np.array([mNrmMinNow, mNrmMinNow + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNowCnst = linear_interp_fast(mNrmGrid.flatten(), mNrmCnst, cNrmCnst)
cFuncNowUnc = linear_interp_fast(
mNrmGrid.flatten(), mNrmNow, cNrmNow, MPCminNow * hNrmNow, MPCminNow
)
cNrmNow = np.where(cFuncNowCnst <= cFuncNowUnc, cFuncNowCnst, cFuncNowUnc)
return cNrmNow, mNrmGrid
@njit(cache=True)
def _add_vFuncNumba(
mNrmNext,
mNrmGridNext,
vNvrsNext,
vNvrsPNext,
MPCminNvrsNext,
hNrmNext,
CRRA,
PermShkVals_temp,
PermGroFac,
DiscFacEff,
ShkPrbs_temp,
EndOfPrdvP,
aNrmNow,
BoroCnstNat,
mNrmGrid,
cFuncNow,
mNrmMinNow,
MPCmaxEff,
MPCminNow,
):
"""
Construct the end-of-period value function for this period, storing it
as an attribute of self for use by other methods.
"""
# vFunc always cubic
vNvrsFuncNow, _ = cubic_interp_fast(
mNrmNext.flatten(),
mNrmGridNext,
vNvrsNext,
vNvrsPNext,
MPCminNvrsNext * hNrmNext,
MPCminNvrsNext,
)
vFuncNext = utility(vNvrsFuncNow, CRRA).reshape(mNrmNext.shape)
VLvlNext = (
PermShkVals_temp ** (1.0 - CRRA) * PermGroFac ** (1.0 - CRRA)
) * vFuncNext
EndOfPrdv = DiscFacEff * np.sum(VLvlNext * ShkPrbs_temp, axis=0)
# value transformed through inverse utility
EndOfPrdvNvrs = utility_inv(EndOfPrdv, CRRA)
EndOfPrdvNvrsP = EndOfPrdvP * utility_invP(EndOfPrdv, CRRA)
EndOfPrdvNvrs = _np_insert(EndOfPrdvNvrs, 0, 0.0)
# This is a very good approximation, vNvrsPP = 0 at the asset minimum
EndOfPrdvNvrsP = _np_insert(EndOfPrdvNvrsP, 0, EndOfPrdvNvrsP[0])
aNrm_temp = _np_insert(aNrmNow, 0, BoroCnstNat)
"""
Creates the value function for this period, defined over market resources m.
self must have the attribute EndOfPrdvFunc in order to execute.
"""
# Compute expected value and marginal value on a grid of market resources
aNrmNow = mNrmGrid - cFuncNow
EndOfPrdvNvrsFunc, _ = cubic_interp_fast(
aNrmNow, aNrm_temp, EndOfPrdvNvrs, EndOfPrdvNvrsP
)
EndOfPrdvFunc = utility(EndOfPrdvNvrsFunc, CRRA)
vNrmNow = utility(cFuncNow, CRRA) + EndOfPrdvFunc
vPnow = utilityP(cFuncNow, CRRA)
# Construct the beginning-of-period value function
vNvrs = utility_inv(vNrmNow, CRRA) # value transformed through inverse utility
vNvrsP = vPnow * utility_invP(vNrmNow, CRRA)
mNrmGrid = _np_insert(mNrmGrid, 0, mNrmMinNow)
vNvrs = _np_insert(vNvrs, 0, 0.0)
vNvrsP = _np_insert(vNvrsP, 0, MPCmaxEff ** (-CRRA / (1.0 - CRRA)))
MPCminNvrs = MPCminNow ** (-CRRA / (1.0 - CRRA))
return (
mNrmGrid,
vNvrs,
vNvrsP,
MPCminNvrs,
)
@njit
def _add_mNrmStEIndNumba(
PermGroFac,
Rfree,
Ex_IncNext,
mNrmMin,
mNrm,
cNrm,
MPC,
MPCmin,
hNrm,
_searchfunc,
):
"""
Finds steady state (normalized) market resources and adds it to the
solution. This is the level of market resources such that the expectation
of market resources in the next period is unchanged. This value doesn't
necessarily exist.
"""
# Minimum market resources plus next income is okay starting guess
m_init_guess = mNrmMin + Ex_IncNext
mNrmStE = newton_secant(
_searchfunc,
m_init_guess,
args=(PermGroFac, Rfree, Ex_IncNext, mNrmMin, mNrm, cNrm, MPC, MPCmin, hNrm),
disp=False,
)
if mNrmStE.converged:
return mNrmStE.root
else:
return None
@njit(cache=True)
def _find_mNrmStELinear(
m, PermGroFac, Rfree, Ex_IncNext, mNrmMin, mNrm, cNrm, MPC, MPCmin, hNrm
):
# Make a linear function of all combinations of c and m that yield mNext = mNow
mZeroChange = (1.0 - PermGroFac / Rfree) * m + (PermGroFac / Rfree) * Ex_IncNext
mNrmCnst = np.array([mNrmMin, mNrmMin + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNowCnst = linear_interp_fast(np.array([m]), mNrmCnst, cNrmCnst)
cFuncNowUnc = linear_interp_fast(np.array([m]), mNrm, cNrm, MPCmin * hNrm, MPCmin)
cNrmNow = np.where(cFuncNowCnst <= cFuncNowUnc, cFuncNowCnst, cFuncNowUnc)
# Find the steady state level of market resources
res = cNrmNow[0] - mZeroChange
# A zero of this is SS market resources
return res
@njit(cache=True)
def _find_mNrmStECubic(
m, PermGroFac, Rfree, Ex_IncNext, mNrmMin, mNrm, cNrm, MPC, MPCmin, hNrm
):
# Make a linear function of all combinations of c and m that yield mNext = mNow
mZeroChange = (1.0 - PermGroFac / Rfree) * m + (PermGroFac / Rfree) * Ex_IncNext
mNrmCnst = np.array([mNrmMin, mNrmMin + 1])
cNrmCnst = np.array([0.0, 1.0])
cFuncNowCnst = linear_interp_fast(np.array([m]), mNrmCnst, cNrmCnst)
cFuncNowUnc, MPCNowUnc = cubic_interp_fast(
np.array([m]), mNrm, cNrm, MPC, MPCmin * hNrm, MPCmin
)
cNrmNow = np.where(cFuncNowCnst <= cFuncNowUnc, cFuncNowCnst, cFuncNowUnc)
# Find the steady state level of market resources
res = cNrmNow[0] - mZeroChange
# A zero of this is SS market resources
return res
class ConsIndShockSolverFast(ConsIndShockSolverBasicFast):
"""
This class solves a single period of a standard consumption-saving problem.
It inherits from ConsIndShockSolverBasic, adding the ability to perform cubic
interpolation and to calculate the value function.
"""
def solve(self):
"""
Solves a one period consumption saving problem with risky income.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem.
"""
if self.CubicBool:
(
self.cNrm,
self.mNrm,
self.MPC,
self.EndOfPrdvP,
) = _solveConsIndShockCubicNumba(
self.solution_next.mNrmMin,
self.mNrmNext,
self.solution_next.mNrm,
self.solution_next.cNrm,
self.solution_next.MPC,
self.solution_next.cFuncLimitIntercept,
self.solution_next.cFuncLimitSlope,
self.CRRA,
self.DiscFacEff,
self.Rfree,
self.PermGroFac,
self.PermShkVals_temp,
self.ShkPrbs_temp,
self.aNrmNow,
self.BoroCnstNat,
self.MPCmaxNow,
)
# Pack up the solution and return it
solution = IndShockSolution(
self.mNrm,
self.cNrm,
self.cFuncLimitIntercept,
self.cFuncLimitSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCminNow,
self.MPCmaxEff,
self.Ex_IncNext,
self.MPC,
)
else:
self.cNrm, self.mNrm, self.EndOfPrdvP = _solveConsIndShockLinearNumba(
self.solution_next.mNrmMin,
self.mNrmNext,
self.CRRA,
self.solution_next.mNrm,
self.solution_next.cNrm,
self.DiscFacEff,
self.Rfree,
self.PermGroFac,
self.PermShkVals_temp,
self.ShkPrbs_temp,
self.aNrmNow,
self.BoroCnstNat,
self.solution_next.cFuncLimitIntercept,
self.solution_next.cFuncLimitSlope,
)
# Pack up the solution and return it
solution = IndShockSolution(
self.mNrm,
self.cNrm,
self.cFuncLimitIntercept,
self.cFuncLimitSlope,
self.mNrmMinNow,
self.hNrmNow,
self.MPCminNow,
self.MPCmaxEff,
self.Ex_IncNext,
)
if self.vFuncBool:
if self.CubicBool:
self.cFuncNow, self.mNrmGrid = _cFuncCubic(
self.aXtraGrid,
self.mNrmMinNow,
self.mNrm,
self.cNrm,
self.MPC,
self.MPCminNow,
self.hNrmNow,
)
else:
self.cFuncNow, self.mNrmGrid = _cFuncLinear(
self.aXtraGrid,
self.mNrmMinNow,
self.mNrm,
self.cNrm,
self.MPCminNow,
self.hNrmNow,
)
self.mNrmGrid, self.vNvrs, self.vNvrsP, self.MPCminNvrs = _add_vFuncNumba(
self.mNrmNext,
self.solution_next.mNrmGrid,
self.solution_next.vNvrs,
self.solution_next.vNvrsP,
self.solution_next.MPCminNvrs,
self.solution_next.hNrm,
self.CRRA,
self.PermShkVals_temp,
self.PermGroFac,
self.DiscFacEff,
self.ShkPrbs_temp,
self.EndOfPrdvP,
self.aNrmNow,
self.BoroCnstNat,
self.mNrmGrid,
self.cFuncNow,
self.mNrmMinNow,
self.MPCmaxEff,
self.MPCminNow,
)
# Pack up the solution and return it
solution.mNrmGrid = self.mNrmGrid
solution.vNvrs = self.vNvrs
solution.vNvrsP = self.vNvrsP
solution.MPCminNvrs = self.MPCminNvrs
return solution
# ============================================================================
# == Classes for representing types of consumer agents (and things they do) ==
# ============================================================================
class PerfForesightConsumerTypeFast(PerfForesightConsumerType):
"""
A perfect foresight consumer type who has no uncertainty other than mortality.
His problem is defined by a coefficient of relative risk aversion, intertemporal
discount factor, interest factor, an artificial borrowing constraint (maybe)
and time sequences of the permanent income growth rate and survival probability.
"""
# Define some universal values for all consumer types
solution_terminal_ = PerfForesightSolution()
def __init__(self, **kwargs):
PerfForesightConsumerType.__init__(self, **kwargs)
self.solve_one_period = make_one_period_oo_solver(ConsPerfForesightSolverFast)
def update_solution_terminal(self):
"""
Update the terminal period solution. This method should be run when a
new AgentType is created or when CRRA changes.
"""
self.solution_terminal_cs = ConsumerSolution(
cFunc=self.cFunc_terminal_,
vFunc=ValueFuncCRRA(self.cFunc_terminal_, self.CRRA),
vPfunc=MargValueFuncCRRA(self.cFunc_terminal_, self.CRRA),
vPPfunc=MargMargValueFuncCRRA(self.cFunc_terminal_, self.CRRA),
mNrmMin=0.0,
hNrm=0.0,
MPCmin=1.0,
MPCmax=1.0,
)
def post_solve(self):
"""
Defines the value and marginal value functions for this period.
Uses the fact that for a perfect foresight CRRA utility problem,
if the MPC at :math:`t` is :math:`\\kappa_{t}`, and relative risk
aversion is :math:`\\rho`, then the inverse value function ``vFuncNvrs`` has a
constant slope of :math:`\\kappa_{t}^{-\\rho/(1-\\rho)}` and
``vFuncNvrs`` has value of zero at the lower bound of market resources
`mNrmMin`. See the `PerfForesightConsumerType <https://hark.readthedocs.io/en/latest/example_notebooks/PerfForesightConsumerType.html?highlight=PerfForesightConsumerType#Solution-method-for-PerfForesightConsumerType>`_ documentation notebook
for a brief explanation and the links below for a fuller treatment.
`PerfForesightCRRA/#vFuncAnalytical <https://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA/#vFuncAnalytical>`_
`SolvingMicroDSOPs/#vFuncPF <https://www.econ2.jhu.edu/people/ccarroll/SolvingMicroDSOPs/#vFuncPF>`_
"""
self.solution_fast = deepcopy(self.solution)
if self.cycles == 0:
terminal = 1
else:
terminal = self.cycles
self.solution[terminal] = self.solution_terminal_cs
for i in range(terminal):
solution = self.solution[i]
# Construct the consumption function as a linear interpolation.
cFunc = LinearInterp(solution.mNrm, solution.cNrm)
vFuncNvrs = LinearInterp(
np.array([solution.mNrmMin, solution.mNrmMin + 1.0]),
np.array([0.0, solution.vFuncNvrsSlope]),
)
vFunc = ValueFuncCRRA(vFuncNvrs, self.CRRA)
vPfunc = MargValueFuncCRRA(cFunc, self.CRRA)
consumer_solution = ConsumerSolution(
cFunc=cFunc,
vFunc=vFunc,
vPfunc=vPfunc,
mNrmMin=solution.mNrmMin,
hNrm=solution.hNrm,
MPCmin=solution.MPCmin,
MPCmax=solution.MPCmax,
)
Ex_IncNext = 1.0 # Perfect foresight income of 1
# Add mNrmStE to the solution and return it
consumer_solution.mNrmStE = _add_mNrmStENumba(
self.Rfree,
self.PermGroFac[i],
solution.mNrm,
solution.cNrm,
solution.mNrmMin,
Ex_IncNext,
_find_mNrmStE,
)
self.solution[i] = consumer_solution
class IndShockConsumerTypeFast(IndShockConsumerType, PerfForesightConsumerTypeFast):
solution_terminal_ = IndShockSolution()
def __init__(self, **kwargs):
IndShockConsumerType.__init__(self, **kwargs)
# Add consumer-type specific objects, copying to create independent versions
if (not self.CubicBool) and (not self.vFuncBool):
solver = ConsIndShockSolverBasicFast
else: # Use the "advanced" solver if either is requested
solver = ConsIndShockSolverFast
self.solve_one_period = make_one_period_oo_solver(solver)
def update_solution_terminal(self):
PerfForesightConsumerTypeFast.update_solution_terminal(self)
with np.errstate(
divide="ignore", over="ignore", under="ignore", invalid="ignore"
):
self.solution_terminal.MPC = np.array([1.0, 1.0])
self.solution_terminal.MPCminNvrs = 0.0
self.solution_terminal.vNvrs = utility(np.linspace(0.0, 1.0), self.CRRA)
self.solution_terminal.vNvrsP = utilityP(np.linspace(0.0, 1.0), self.CRRA)
self.solution_terminal.mNrmGrid = np.linspace(0.0, 1.0)
def post_solve(self):
self.solution_fast = deepcopy(self.solution)
if self.cycles == 0:
cycles = 1
else:
cycles = self.cycles
self.solution[-1] = self.solution_terminal_cs
for i in range(cycles):
for j in range(self.T_cycle):
solution = self.solution[i * self.T_cycle + j]
# Define the borrowing constraint (limiting consumption function)
cFuncNowCnst = LinearInterp(
np.array([solution.mNrmMin, solution.mNrmMin + 1]),
np.array([0.0, 1.0]),
)
"""
Constructs a basic solution for this period, including the consumption
function and marginal value function.
"""
if self.CubicBool:
# Makes a cubic spline interpolation of the unconstrained consumption
# function for this period.
cFuncNowUnc = CubicInterp(
solution.mNrm,
solution.cNrm,
solution.MPC,
solution.cFuncLimitIntercept,
solution.cFuncLimitSlope,
)
else:
# Makes a linear interpolation to represent the (unconstrained) consumption function.
# Construct the unconstrained consumption function
cFuncNowUnc = LinearInterp(
solution.mNrm,
solution.cNrm,
solution.cFuncLimitIntercept,
solution.cFuncLimitSlope,
)
# Combine the constrained and unconstrained functions into the true consumption function
cFuncNow = LowerEnvelope(cFuncNowUnc, cFuncNowCnst)
# Make the marginal value function and the marginal marginal value function
vPfuncNow = MargValueFuncCRRA(cFuncNow, self.CRRA)
# Pack up the solution and return it
consumer_solution = ConsumerSolution(
cFunc=cFuncNow,
vPfunc=vPfuncNow,
mNrmMin=solution.mNrmMin,
hNrm=solution.hNrm,
MPCmin=solution.MPCmin,
MPCmax=solution.MPCmax,
)
if self.vFuncBool:
vNvrsFuncNow = CubicInterp(
solution.mNrmGrid,
solution.vNvrs,
solution.vNvrsP,
solution.MPCminNvrs * solution.hNrm,
solution.MPCminNvrs,
)
vFuncNow = ValueFuncCRRA(vNvrsFuncNow, self.CRRA)
consumer_solution.vFunc = vFuncNow
if self.CubicBool or self.vFuncBool:
_searchFunc = (
_find_mNrmStECubic if self.CubicBool else _find_mNrmStELinear
)
# Add mNrmStE to the solution and return it
consumer_solution.mNrmStE = _add_mNrmStEIndNumba(
self.PermGroFac[j],
self.Rfree,
solution.Ex_IncNext,
solution.mNrmMin,
solution.mNrm,
solution.cNrm,
solution.MPC,
solution.MPCmin,
solution.hNrm,
_searchFunc,
)
self.solution[i * self.T_cycle + j] = consumer_solution
| 33.160247
| 250
| 0.609753
|
ba916170370225478877a5402ce248567e47cb1c
| 1,606
|
py
|
Python
|
samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | 1
|
2022-03-30T05:23:29.000Z
|
2022-03-30T05:23:29.000Z
|
samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
samples/generated_samples/aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py
|
sakagarwal/python-aiplatform
|
62b4a1ea589235910c6e87f027899a29bf1bacb1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ReadTensorboardBlobData
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync]
from google.cloud import aiplatform_v1beta1
def sample_read_tensorboard_blob_data():
# Create a client
client = aiplatform_v1beta1.TensorboardServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ReadTensorboardBlobDataRequest(
time_series="time_series_value",
)
# Make the request
stream = client.read_tensorboard_blob_data(request=request)
# Handle the response
for response in stream:
print(response)
# [END aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync]
| 34.170213
| 86
| 0.774595
|
421132a9cbb747386670f547cbcb0cf377d692cd
| 1,182
|
py
|
Python
|
tmis.py
|
Arnie97/emu-screenshot-server
|
a50526c0d852cd61050ba2926d43c84241d57afb
|
[
"MIT"
] | 42
|
2018-01-16T10:48:58.000Z
|
2020-08-28T07:34:56.000Z
|
tmis.py
|
Arnie97/emu-screenshot
|
a50526c0d852cd61050ba2926d43c84241d57afb
|
[
"MIT"
] | 1
|
2018-11-12T06:20:50.000Z
|
2018-11-12T06:45:30.000Z
|
tmis.py
|
Arnie97/emu-screenshot
|
a50526c0d852cd61050ba2926d43c84241d57afb
|
[
"MIT"
] | 4
|
2018-06-09T02:29:45.000Z
|
2020-08-07T11:47:52.000Z
|
#!/usr/bin/env python3
import json
import requests
from collections import OrderedDict
from util import repl, progress
def tmis(name='', bureau=0) -> OrderedDict:
url = 'http://hyfw.12306.cn/hyinfo/action/FwcszsAction_getljcz'
params = 'limit timestamp sheng shi'
params = {k: '' for k in params.split()}
params.update(q=name, ljdm=format(bureau, '02'))
while True:
try:
response = requests.post(url, params, timeout=1).json()
except (requests.exceptions.Timeout, json.JSONDecodeError):
progress('X')
else:
break
return OrderedDict((d['HZZM'], d['TMISM']) for d in response)
def dfs(name='') -> OrderedDict:
'Split bulk requests into chunks.'
results = tmis(name)
if len(results) == 50:
for i in range(1, 19):
progress()
results.update(tmis(name, i))
return results
def main(name: str):
'Format the query results.'
results = dfs(name)
if len(results) >= 50:
print()
for k, v in results.items():
print('|', k.ljust(5, '\u3000'), v)
print('=', len(results), '\n')
if __name__ == '__main__':
repl(main)
| 25.148936
| 67
| 0.598985
|
ae6a791de2aec2ad4fd07289ac9190e237754de6
| 5,685
|
py
|
Python
|
dask_fft.py
|
moble/dask_fft
|
6862f3b29800f27aca8ab1d11dca4f49705b39ee
|
[
"MIT"
] | null | null | null |
dask_fft.py
|
moble/dask_fft
|
6862f3b29800f27aca8ab1d11dca4f49705b39ee
|
[
"MIT"
] | null | null | null |
dask_fft.py
|
moble/dask_fft
|
6862f3b29800f27aca8ab1d11dca4f49705b39ee
|
[
"MIT"
] | null | null | null |
import sys
def fft(x, axis=-1, chunksize=2**26, available_memory=(4 * 1024**3), cache=None):
"""Simple wrapper for DAFT FFT function
This function calls the DAFT function, but also performs the computation of
the FFT, and returns the result as a numerical array.
Parameters
----------
x : array_like
Input array, can be complex.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is used.
chunksize : int, optional
Chunksize to use when splitting up the input array. Default is 2**24,
which is about 64MB -- a reasonable target that reduces memory usage.
available_memory : int, optional
Maximum amount of RAM to use for caching during computation. Defaults
to 4*1024**3, which is 4GB.
"""
if cache is None:
from chest import Chest # For more flexible caching
cache = Chest(available_memory=available_memory)
X_dask = DAFT(x, axis=axis, chunksize=chunksize)
return X_dask.compute(cache=cache)
def fft_to_hdf5(x, filename, axis=-1, chunksize=2**26, available_memory=(4 * 1024**3), cache=None):
"""Simple wrapper for DAFT FFT function that writes to HDF5
This function calls the DAFT function, but also performs the computation of
the FFT, and outputs the result into the requested HDF5 file
Parameters
----------
x : array_like
Input array, can be complex.
filename : string
Relative or absolute path to HDF5 file. If this string contains a
colon, the preceding part is taken as the filename, while the following
part is taken as the dataset group name. The default group name is 'X'.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is used.
chunksize : int, optional
Chunksize to use when splitting up the input array. Default is 2**24,
which is about 64MB -- a reasonable target that reduces memory usage.
available_memory : int, optional
Maximum amount of RAM to use for caching during computation. Defaults
to 4*1024**3, which is 4GB.
"""
from h5py import File
from dask import set_options
from dask.array import store
if cache is None:
from chest import Chest # For more flexible caching
cache = Chest(available_memory=available_memory)
if ':' in filename:
filename, groupname = filename.split(':')
else:
groupname = 'X'
X_dask = DAFT(x, axis=axis, chunksize=chunksize)
with set_options(cache=cache):
with File(filename, 'w') as f:
output = f.create_dataset(groupname, shape=X_dask.shape, dtype=X_dask.dtype)
store(X_dask, output)
return
def DAFT(x, axis=-1, chunksize=2**26):
"""Disk-Array Fourier Transform
This function enables Fourier transforms of a very large series, where the
entire series will not fit in memory. The standard radix-2 Cooley–Tukey
algorithm is used to split the series up into smaller pieces until a given
piece can be done entirely in memory. This smaller result is then stored
as a `dask.array`, and combined with other similar results out of memory,
using dask.
Parameters
----------
x : array_like
Input array, can be complex.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is used.
chunksize : int, optional
Chunksize to use when splitting up the input array. Default is 2**24,
which is about 64MB -- a reasonable target that reduces memory usage.
Returns
-------
X_da : dask Array object
The Fourier transform is not yet computed; you must call
`X_da.compute()` on the result to perform the computation.
Example
-------
>>> import numpy as np
>>> from chest import Chest # For more flexible caching
>>> cache = Chest(available_memory=(4 * 1024**3)) # Use 4GB at most
>>> N = 2**26
>>> chunksize = N//(2**2)
>>> np.random.seed(1234)
>>> x = np.random.random(N) + 1j*np.random.random(N)
>>> X_dask = DAFT(x, chunksize=chunksize)
>>> %tic
>>> X_DAFT = X_dask.compute(cache=cache)
>>> %toc
>>> %tic
>>> X_np = np.fft.fft(x)
>>> %toc
>>> np.allclose(X_DAFT, X_np)
"""
import numpy as np
import dask.array as da
if axis<0:
axis = x.ndim + axis
N = x.shape[axis]
chunks = tuple(1 if ax!=axis else chunksize for ax,dim in enumerate(x.shape))
if isinstance(x, da.Array):
x_da = x.rechunk(chunks=chunks)
else:
x_da = da.from_array(x, chunks=chunks)
W = np.exp(-2j * np.pi * np.arange(N) / N)
# print(x.shape, axis, x_da.chunks, x_da.chunks[axis]); sys.stdout.flush()
slice_even = tuple(slice(None) if ax!=axis else slice(None, None, 2) for ax in range(x_da.ndim))
slice_odd = tuple(slice(None) if ax!=axis else slice(1, None, 2) for ax in range(x_da.ndim))
if len(x_da.chunks[axis]) != 1:
# TODO: Fix the following lines to be correct when x is multi-dimensional
FFT_even = DAFT(x_da[slice_even], axis, chunksize=chunksize)
FFT_odd = DAFT(x_da[slice_odd], axis, chunksize=chunksize)
else:
# TODO: Fix the following lines to be correct when x is multi-dimensional
FFT_even = da.fft.fft(x_da[slice_even], n=None, axis=axis)
FFT_odd = da.fft.fft(x_da[slice_odd], n=None, axis=axis)
# TODO: Fix the following line to broadcast W correctly when x is multi-dimensional
return da.concatenate([FFT_even + W[:N//2] * FFT_odd, FFT_even + W[N//2:] * FFT_odd], axis=axis)
| 39.755245
| 100
| 0.65066
|
5a15f6a531492e387aef95dc497f74237ba5cdc8
| 14,839
|
py
|
Python
|
ipwhois/examples/elastic_search/elastic_search.py
|
stanislavlevin/ipwhois
|
b5d634d36b0b942d538d38d77b3bdcd815f155a0
|
[
"BSD-2-Clause"
] | 444
|
2015-01-01T05:00:28.000Z
|
2022-03-11T03:03:18.000Z
|
ipwhois/examples/elastic_search/elastic_search.py
|
stanislavlevin/ipwhois
|
b5d634d36b0b942d538d38d77b3bdcd815f155a0
|
[
"BSD-2-Clause"
] | 219
|
2015-02-02T14:07:48.000Z
|
2022-02-22T20:10:27.000Z
|
ipwhois/examples/elastic_search/elastic_search.py
|
stanislavlevin/ipwhois
|
b5d634d36b0b942d538d38d77b3bdcd815f155a0
|
[
"BSD-2-Clause"
] | 129
|
2015-04-22T11:53:55.000Z
|
2022-02-13T06:17:51.000Z
|
# Basic example showing how to use ipwhois with elasticsearch/kibana.
#
# For geolite2 data, download the GeoLite2 database GeoLite2-City.mmdb and
# place in the data directory:
# https://dev.maxmind.com/geoip/geoip2/geolite2/
import argparse
import elasticsearch
from elasticsearch.helpers import scan
from ipwhois import IPWhois
from ipwhois.utils import get_countries
from ipwhois import __version__
from datetime import datetime
import geoip2.database
import json
import io
import sys
from os import path
from geopy.geocoders import Nominatim
from geopy.exc import (GeocoderQueryError, GeocoderTimedOut)
# Used to convert addresses to geo locations.
GEOLOCATOR = Nominatim(user_agent='ipwhois/{0}'.format(__version__))
# Setup the arg parser.
parser = argparse.ArgumentParser(
description='Populate ElasticSearch with ipwhois data.\n'
'Index: ipwhois\nDoc Types: base, entity'
)
parser.add_argument(
'--create',
action='store_true',
help='Create the ipwhois ElasticSearch index.'
)
parser.add_argument(
'--delete',
action='store_true',
help='Delete the ipwhois ElasticSearch index.'
)
parser.add_argument(
'--insert',
type=str,
nargs=1,
metavar='"IP"',
help='An IPv4 or IPv6 address as a string.'
)
parser.add_argument(
'--update',
action='store_true',
help='Update entries rather than inserting new.'
)
parser.add_argument(
'--expires',
type=int,
default=7,
metavar='INTEGER',
help='Only insert/update/query an IP address if it is '
'older than EXPIRES (days). Default: 7.'
)
parser.add_argument(
'--depth',
type=int,
default=1,
metavar='INTEGER',
help='How many levels deep to run queries when additional '
'referenced objects (IP entities) are found. Default: 1.'
)
parser.add_argument(
'--kexport',
type=str,
nargs=1,
metavar='"FILEPATH"',
help='Export the ipwhois Kibana configuration (Index: .kibana) to a json'
'file (FILEPATH).'
)
parser.add_argument(
'--kimport',
type=str,
nargs=1,
metavar='"FILEPATH"',
help='Import the ipwhois default Kibana configuration (Index: .kibana) '
'from a json file (FILEPATH).'
)
parser.add_argument(
'--host',
type=str,
nargs=1,
metavar='"HOST"',
default=['localhost'],
help='The ElasticSearch host to connect to. Default: "localhost".'
)
parser.add_argument(
'--port',
type=int,
metavar='PORT',
default=9200,
help='The ElasticSearch port to connect to. Default: 9200.'
)
# Get the args
args = parser.parse_args()
# Common es mapping.
DEFAULT_MAPPING = {
'date_detection': 'true',
'properties': {
'@version': {
'type': 'text',
'index': False
},
'updated': {
'type': 'date',
'format': 'date_time_no_millis',
'ignore_malformed': 'false'
}
},
'dynamic_templates': [
{
'string_fields': {
'match': '*',
'match_mapping_type': 'string',
'mapping': {
'type': 'keyword'
}
}
}
]
}
# Get the current working directory.
CUR_DIR = path.dirname(__file__)
# Load the geo json for mapping ISO country codes to lat/lon geo coords.
with io.open(str(CUR_DIR) + '/data/geo_coord.json', 'r') as data_file:
GEO_COORD = json.load(data_file)
# Get the ISO country code mappings.
COUNTRIES = get_countries()
# Default: localhost:9200
es = elasticsearch.Elasticsearch(host=args.host[0], port=args.port)
def delete_index():
try:
# Delete existing entries
es.indices.delete(index='ipwhois_base')
except elasticsearch.exceptions.NotFoundError:
pass
try:
# Delete existing entries
es.indices.delete(index='ipwhois_entity')
except elasticsearch.exceptions.NotFoundError:
pass
def create_index():
# Create the ipwhois_base index
es.indices.create(index='ipwhois_base', ignore=400, body={
'settings': {
'index.refresh_interval': '5s',
'analysis': {
'analyzer': {
'base': {
'type': 'standard',
'stopwords': '_none_'
},
'entity': {
'type': 'standard',
'stopwords': '_none_'
}
}
}
}
})
# Create the ipwhois_entity index
es.indices.create(index='ipwhois_entity', ignore=400, body={
'settings': {
'index.refresh_interval': '5s',
'analysis': {
'analyzer': {
'base': {
'type': 'standard',
'stopwords': '_none_'
},
'entity': {
'type': 'standard',
'stopwords': '_none_'
}
}
}
}
})
# base doc type mapping
mapping = DEFAULT_MAPPING.copy()
mapping['properties'].update({
'asn_date': {
'type': 'date',
'format': 'date',
'ignore_malformed': 'true'
},
'network_events_timestamp': {
'type': 'date',
'format': 'date_time_no_millis',
'ignore_malformed': 'false'
},
'query': {
'type': 'ip',
'store': 'true',
'ignore_malformed': 'true'
},
'query_geo': {
'type': 'geo_point'
},
'network': {
'properties': {
'country_geo': {
'type': 'geo_point'
},
'start_address': {
'type': 'ip',
'store': 'true',
'ignore_malformed': 'true'
},
'end_address': {
'type': 'ip',
'store': 'true',
'ignore_malformed': 'true'
}
}
}
})
es.indices.put_mapping(
index='ipwhois_base',
doc_type='base',
body=mapping,
allow_no_indices=True
)
# entity doc type mapping
mapping = DEFAULT_MAPPING.copy()
mapping['properties'].update({
'contact': {
'properties': {
'address': {
'properties': {
'geo': {
'type': 'geo_point'
},
'value': {
'type': 'text',
}
}
}
}
}
})
es.indices.put_mapping(
index='ipwhois_entity',
doc_type='entity',
body=mapping,
allow_no_indices=True
)
def insert(input_ip='', update=True, expires=7, depth=1):
if update:
try:
# Only update if older than x days.
tmp = es.search(
index='ipwhois_base',
doc_type='base',
body={
'query': {
'bool': {
'must': [{
'range': {
'updated': {
'gt': 'now-{0}d'.format(expires)
}
}
}, {
'term': {
'query': str(input_ip)
}
}]
}
}
}
)
if len(tmp['hits']['hits']) > 0:
return
# A generic exception is raised, unfortunately.
except Exception as e:
print(e)
pass
# Perform the RDAP lookup for the input IP address retriving all entities
# up to depth.
result = IPWhois(input_ip)
ret = result.lookup_rdap(depth=depth)
tmp_objects = ret['objects'].items()
for ent_k, ent_v in tmp_objects:
if update:
try:
# Only update if older than 7 days.
es_tmp = es.search(
index='ipwhois_entity',
doc_type='entity',
body={
'query': {
'bool': {
'must': [
{
'range': {
'updated': {
'gt': 'now-{0}d'.format(expires)
}
}
},
{
'term': {
'handle': str(ent_k)
}
}
]
}
}
}
)
if len(es_tmp['hits']['hits']) > 0:
continue
# A generic exception is raised, unfortunately.
except Exception as e:
print(e)
pass
ent = ent_v
if sys.version_info >= (2, 7):
# Iterate the contact addresses.
for addr_k, addr_v in enumerate(ent_v['contact']['address']):
try:
# Attempt to translate the contact address to geo
# coordinates via geopy.
location = GEOLOCATOR.geocode(addr_v['value'].replace(
'\n', ' '))
# Add the geo coordinates for the contact address.
ent['contact']['address'][addr_k]['geo'] = {
'lat': location.latitude,
'lon': location.longitude
}
except (AttributeError, KeyError, GeocoderQueryError,
GeocoderTimedOut):
pass
# Set the entity updated timestamp.
ent['updated'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if update:
try:
ent_search = es.search(
index='ipwhois_entity',
doc_type='entity',
body={
'query': {
'match': {
'handle': ent['handle']
}
}
}
)
for hit in ent_search['hits']['hits']:
es.delete(index='ipwhois_entity', doc_type='entity',
id=hit['_id'])
except KeyError:
pass
# Index the entity in elasticsearch.
es.index(index='ipwhois_entity', doc_type='entity', body=ent)
# Refresh the index for searching duplicates.
es.indices.refresh(index='ipwhois_entity')
# Don't need the objects key since that data has been entered as the
# entities doc_type.
del ret['objects']
try:
# Get the network ISO country code
cc = ret['network']['country']
# Add the geo coordinates for the country, defined in GEO_COORD.json.
ret['network']['country_geo'] = {
'lat': GEO_COORD[cc]['latitude'],
'lon': GEO_COORD[cc]['longitude']
}
# Set the network country name.
ret['network']['country_name'] = COUNTRIES[cc]
except KeyError:
pass
try:
# Get the MaxMind geo data for the query.
# I do not redistribute the GeoLite2 database, download
# GeoLite2-City.mmdb from:
# https://dev.maxmind.com/geoip/geoip2/geolite2/
mm_reader = geoip2.database.Reader(str(CUR_DIR) +
'/data/GeoLite2-City.mmdb')
# Query the database.
mm_response = mm_reader.city(ret['query'])
# Set the JSON geo data.
ret['query_geo'] = {
'lat': mm_response.location.latitude,
'lon': mm_response.location.longitude
}
ret['query_country_name'] = COUNTRIES[mm_response.country.iso_code]
# Generic exception. Need to determine all raised and update handling.
# geoip2.errors.AddressNotFoundError, TypeError, etc.
except Exception as e:
print(e)
pass
# Set the base updated timestamp.
ret['updated'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
if update:
try:
ip_search = es.search(
index='ipwhois_base',
doc_type='base',
body={
'query': {
'match': {
'query': ret['query']
}
}
}
)
for hit in ip_search['hits']['hits']:
es.delete(index='ipwhois_base', doc_type='base', id=hit['_id'])
except KeyError:
pass
# Index the base in elasticsearch.
es.index(index='ipwhois_base', doc_type='base', body=ret)
# Refresh the indices for searching duplicates.
es.indices.refresh(index='ipwhois_base')
es.indices.refresh(index='ipwhois_entity')
if args.delete:
delete_index()
if args.create:
create_index()
if args.insert:
insert(args.insert[0], args.update, args.expires, args.depth)
if args.kexport:
# Export dashboards, searches, and visualizations.
kibana_export = list(scan(
client=es, index='.kibana',
doc_type='dashboard,search,visualization')
)
# Export the ipwhois index pattern.
kibana_idx_export = list(scan(
client=es,
index='.kibana',
doc_type='index-pattern',
query={'query': {'match': {'_id': 'ipwhois*'}}}
))
# Dump exports to json file.
with io.open(args.kexport[0], 'w') as data_file:
json.dump(kibana_export + kibana_idx_export, data_file)
if args.kimport:
# Open kibana json file.
with io.open(args.kimport[0], 'r') as data_file:
kibana_import = json.load(data_file)
# Update or Insert kibana config for ipwhois.
for item in kibana_import:
es.update(index='.kibana', doc_type=item['_type'], id=item['_id'],
body={'doc': item['_source'], 'doc_as_upsert': True})
| 26.98
| 80
| 0.473549
|
65baa46d615492535a880a4590420a3e317f9eb3
| 282
|
py
|
Python
|
filemanager/__init__.py
|
datopian/filemanager
|
905b1cfca0a6874d2e79e7940a8dd1055dcb955b
|
[
"MIT"
] | 4
|
2019-11-18T12:04:07.000Z
|
2020-03-07T02:44:06.000Z
|
filemanager/__init__.py
|
datopian/filemanager
|
905b1cfca0a6874d2e79e7940a8dd1055dcb955b
|
[
"MIT"
] | 2
|
2018-03-22T11:49:49.000Z
|
2018-09-04T04:15:04.000Z
|
filemanager/__init__.py
|
datahq/filemanager
|
905b1cfca0a6874d2e79e7940a8dd1055dcb955b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import io
import os
from .blueprint import make_blueprint
from .models import FileManager
VERSION_FILE = os.path.join(os.path.dirname(__file__), 'VERSION')
__version__ = io.open(VERSION_FILE, encoding='utf-8').readline().strip()
__all__ = ['FileManager']
| 23.5
| 72
| 0.737589
|
1d7b1059bf2d70b6927591320b09ec7f4521abe8
| 2,121
|
py
|
Python
|
make-dataset.py
|
markriedl/StupidlySimpleCar
|
2301880b5f88b19fc270eae3ccc348b358f8cda2
|
[
"MIT"
] | 4
|
2017-02-09T08:56:29.000Z
|
2017-03-22T15:40:19.000Z
|
make-dataset.py
|
markriedl/StupidlySimpleCar
|
2301880b5f88b19fc270eae3ccc348b358f8cda2
|
[
"MIT"
] | null | null | null |
make-dataset.py
|
markriedl/StupidlySimpleCar
|
2301880b5f88b19fc270eae3ccc348b358f8cda2
|
[
"MIT"
] | null | null | null |
import random, sys
verbose = False
filename = '' # If data is being loaded from a CSV instead, this is the filename
numRows = 0 # How many samples?
if len(sys.argv) < 3:
# Invalid command line usage
print("usage: python "+sys.argv[0]+" output.csv num_samples")
exit()
else:
filename = sys.argv[1]
numRows = int(sys.argv[2])
# Open the file
with open(filename, 'w') as file:
#write the header row
file.write('front,back,left,right,brakes,accel,left,right,dummy\n')
# Make new rows
for n in range(numRows):
# Generate a random state
# the distance to the nearest car in front/back/left/right is normalized from 0.0 (closest) to 1.0 (farthest)
carInFrontDist = random.random()
carInBackDist = random.random()
carLeftDist = random.random()
carRightDist = random.random()
# Response to the state. 1 = brakes/accelerator/steer-left/steer-right is activated. 0=not activated
# Though binary, we will be using numbers
brakes = 0.0
accel = 1.0
left = 0.0
right = 0.0
# Should I accelerate or brake?
if carInFrontDist < 0.50:
# Car is close, brake
# Unless there is another car close behind
if carInBackDist > 0.50:
# Okay to brake
brakes = 1.0 - (carInFrontDist/0.50)
accel = 0
else:
# Not okay to brake, but at least stop accelerating
brakes = 0
accel = 0
else:
# Car in front is not close, continue to accelerate
accel = (carInFrontDist - 0.50)/0.50
brakes = 0
# Should I turn left or right? (can't do both)
if carLeftDist < 0.50 and carRightDist > 0.50:
# A car is close on the left, there is space on the right
right = 1.0 - (carLeftDist/0.50)
left = 0
elif carRightDist < 0.50 and carLeftDist > 0.50:
# A car is close on the right, there is space on the left
left = 1.0 - (carRightDist/0.50)
right = 0
# Coma separated row of data
out = str(carInFrontDist)+','+str(carInBackDist)+','+str(carLeftDist)+','+str(carRightDist)+','+str(brakes)+','+str(accel)+','+str(left)+','+str(right)+',0'
# Maybe print to screen too?
if verbose:
print out
# Write to file
file.write(out+'\n')
| 27.907895
| 158
| 0.664309
|
4be962881de701575845275e39fbdf4a8376aa7f
| 8,718
|
py
|
Python
|
keith_data_intern_project_2.py
|
MrStacks/Shopify_data_engineer_internship
|
5d2e9de889b2e30f934a58fec6313e77cb67f070
|
[
"Apache-2.0"
] | 2
|
2022-01-24T20:40:53.000Z
|
2022-01-24T20:41:04.000Z
|
keith_data_intern_project_2.py
|
MrStacks/Shopify_data_engineer_internship
|
5d2e9de889b2e30f934a58fec6313e77cb67f070
|
[
"Apache-2.0"
] | null | null | null |
keith_data_intern_project_2.py
|
MrStacks/Shopify_data_engineer_internship
|
5d2e9de889b2e30f934a58fec6313e77cb67f070
|
[
"Apache-2.0"
] | null | null | null |
"""Searchable Image Repository
This script allows the user to store encrypted images and accompanying
keywords in a database. The user can then search the images via keyword
search.
Program accepts the following image file types: .bmp, .jpeg, .jpg, .png, .tiff
This script requires the following to be installed within the Python environment
you are running the script in:
`os` `sys` `cv2` `glob``pandas` `cryptography.fernet` `uuid`
This file can also be imported as a module and contains the following
functions:
* get_key() - returns an encryption key for the file
* get_dataframe() - returns Pandas dataframe of existing database, or creates a new one
* encrypt_file() - returns image file in encrypted format
* image_data() - requests image attributes from user & populates corresponding values
* store_images() - stores image(s) in the database
* search_images() - requests image keyword(s) & returns corresponding image(s)
* main - main function permits user interactivity with the script
The script is a submission to the Summer 2022 Shopify Data Engineering
Internship Challenge. It is intended to be used as a base program which
can later be expanded for greater functionality. (E.g., to use a SQL or
other database, expanded image search via machine learning derived features,
etc.)
"""
## Import modules
import os
import sys
import cv2
import glob
import pandas as pd
from cryptography.fernet import Fernet
import uuid
## Constants
KEY_FILENAME = './key.key'
DATA_FILENAME = './data.csv'
## MOCKFunction
def get_input(prompt = ''):
return input(prompt)
## Functions
def get_key():
'''
Generates encryption key, if one doesn't already exist.
:return: Fernet generated encryption key
'''
if os.path.exists(KEY_FILENAME):
with open(KEY_FILENAME, 'rb') as key_file:
return key_file.read()
else:
KEY = Fernet.generate_key() # encryption key
with open(KEY_FILENAME, 'wb') as key_file:
key_file.write(KEY)
return KEY
def get_dataframe():
'''
Checks if a dataframe (serving as the database) exists.
If so, returns the existing database. If not, creates a new
dataframe -> database.
:return: CSV version of dataframe with headerdata ready for images
'''
if os.path.exists(DATA_FILENAME): #if there is already a dataframe, use it
frame = pd.read_csv(DATA_FILENAME, dtype=str)
return frame
else: # if there is no dataframe, create one
DATA_DF = pd.DataFrame({
'image_name': pd.Series(dtype='str'),
'image_code': pd.Series(dtype='str'),
'image_keywords': pd.Series(dtype='str'),
'image_features':pd.Series(dtype='str'),
'image_access':pd.Series(dtype='str'),
'user_pass':pd.Series(dtype='str'),
'unique_uuid':pd.Series(dtype='str')
})
return DATA_DF
def encrypt_file(image_path, image_format):
'''
Converts image to encrypted code
:param image_path: filepath from which image was obtained
:param image_format: file extension as a String
:return: encrypted image
'''
F = Fernet(get_key())
image = cv2.imread(image_path)
img_encode = cv2.imencode('.'+image_format, image)[1].tobytes()
encrypted = F.encrypt(img_encode)
return encrypted
def image_data(image_path):
'''
Requests or generates a unique name, encode, kewords,
and features for each image. Populates them into dataframe values.
:param image_path: filepath of the image
'''
filename, file_format = os.path.splitext(image_path)
filename = os.path.basename(filename)
# encrypt the image
image_code = encrypt_file(image_path, file_format)
image_access = 'private' #default
# get features from the user
print('Image found: ' + filename)
# Get image keywords, process to remove unneccesary spaces
image_keywords = get_input("Enter the relavant keywords about this image (separate with ','): ").split(',')
for i in range(len(image_keywords)):
image_keywords[i] = image_keywords[i].strip() # removes spaces on front and back of string
image_keywords = ",".join(image_keywords)
# get image permission from the user
permission = get_input("Do you want to store this image as a public image (y/n): ").lower().strip()
while permission != 'y' and permission != 'n':
if permission == 'y':
image_access = 'public'
elif permission == 'n':
image_access = 'private'
else:
permission = get_input("Do you want to store this image as a public image (y/n): ").lower()
unique_uuid = uuid.uuid4() # generate unique UUID (for future MySQL database implementation)
# Process same as image_keywords
image_features = get_input("Please list any features of the image (separate with ','): ").split(',')
for i in range(len(image_features)):
image_features[i] = image_features[i].strip()
image_features = ",".join(image_features)
user_pass = "12345"
### TODO in the future add an "auto naming & auto featuring" function via extracting image features
### TODO with image processing algorithms (ML) here
return {
"image_name": filename,
"image_code": image_code,
"image_keywords": image_keywords,
"image_features": image_features,
"image_access": image_access,
"user_pass": user_pass,
"unique_uuid": unique_uuid
}
def store_images(directory):
'''
Stores image(s) in the database by loading the image (via filepath),
converting it to a dictionary of values, appends the dictionary to
the Pandas Dataframe, and writes Dataframe to a csv file.
:param directory: current directory from which user wants to upload images
'''
IMAGE_FORMATS = ['.bmp', '.jpeg', '.jpg', '.png', '.tiff']
DATA_DF = get_dataframe()
# Check if the directory is a file or a directory
files = glob.glob(os.path.join(directory,'*')) # glob searches over every file in the directory that the user entered
if os.path.isfile(directory):
files = [directory]
for file in files:
extension = os.path.splitext(file)
if not extension[1] in IMAGE_FORMATS: #is this one of the image formats we want
continue
filename = os.path.basename(extension[0])
if DATA_DF is not None and DATA_DF['image_name'].str.contains(filename).any():
continue
DATA_DF = DATA_DF.append([image_data(file)]) # create a List of the image data
DATA_DF.to_csv(DATA_FILENAME)
def search_images(column):
'''
Searches the database for the image keywords entered by the user by
comparing the existing keywords with the keywords searched.
:return: images whose keywords correspond to keywords searched
'''
DATA_DF = get_dataframe()
keyword_search = get_input("Please enter the keywords(s) that you want to search (separate with ','): ").lower().split(',')
images_found = []
keyword_list = DATA_DF[column].to_list()
for i in range(len(keyword_list)):
keywords = keyword_list[i].split(',')
for keyword in keyword_search:
keyword_strip = keyword.strip()
if keyword_strip in keywords:
images_found.append(DATA_DF.iloc[i])
break
return images_found
def main():
'''
Program logic first requests a directory path from the user
(or using current directory) and then requests a mode of
search, store, or exit.
'''
while True:
directory = get_input('Enter directory path (if you want the current directory press Enter): ')
if directory == '': # if user doesn't enter a directory
directory = os.getcwd() # ow.getcwd() returns current directory user is functioning in (output of pwd)
if os.path.exists(directory):
break
print('You did not enter an appropriate directory, please try again.')
while True:
print("Menu")
print("1. Store images in directory")
print("2. Search images by keyword")
print("3. Search images by feature")
print("q. Quit")
phase = get_input().lower().strip()
if phase == '1':
store_images(directory)
elif phase == '2':
print(search_images('image_keywords'))
elif phase == '3':
print(search_images('image_features'))
elif phase == 'q':
sys.exit()
# main guard
if __name__ == '__main__':
main()
| 36.630252
| 127
| 0.656228
|
3adfa51d0926b9922a3d59344af39b24ed7a3436
| 30,125
|
py
|
Python
|
core/pycopia/inet/SMTP.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 89
|
2015-03-26T11:25:20.000Z
|
2022-01-12T06:25:14.000Z
|
core/pycopia/inet/SMTP.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 1
|
2015-07-05T03:27:43.000Z
|
2015-07-11T06:21:20.000Z
|
core/pycopia/inet/SMTP.py
|
kdart/pycopia
|
1446fabaedf8c6bdd4ab1fc3f0ea731e0ef8da9d
|
[
"Apache-2.0"
] | 30
|
2015-04-30T01:35:54.000Z
|
2022-01-12T06:19:49.000Z
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
'''SMTP/ESMTP client class.
This should follow RFC 821 (SMTP), RFC 1869 (ESMTP), RFC 2554 (SMTP
Authentication) and RFC 2487 (Secure SMTP over TLS).
Notes:
Please remember, when doing ESMTP, that the names of the SMTP service
extensions are NOT the same thing as the option keywords for the RCPT
and MAIL commands!
Example:
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> print s.help()
This is Sendmail version 8.8.4
Topics:
HELO EHLO MAIL RCPT DATA
RSET NOOP QUIT HELP VRFY
EXPN VERB ETRN DSN
For more info use "HELP <topic>".
To report bugs in the implementation send email to
sendmail-bugs@sendmail.org.
For local information send email to Postmaster at your site.
End of HELP info
>>> s.putcmd(b"vrfy","someone@here")
>>> s.getreply()
(250, "Somebody OverHere <somebody@here.my.org>")
>>> s.quit()
'''
from __future__ import print_function
# Author: The Dragon De Monsyne <dragondm@integral.org>
# ESMTP support, test code and doc fixes added by
# Eric S. Raymond <esr@thyrsus.com>
# Better RFC 821 compliance (MAIL and RCPT, and CRLF in data)
# by Carey Evans <c.evans@clear.net.nz>, for picky mail servers.
# RFC 2554 (authentication) support by Gerhard Haering <gerhard@bigfoot.de>.
#
# This was modified from the Python 1.5 library HTTP lib.
#
# Extensive modifications by Keith Dart <kdart@kdart.com>.
import re
from email.utils import parseaddr
import types
import base64
import hmac
from errno import EINTR, ECONNREFUSED
from io import BytesIO
from pycopia import socket
from pycopia import scheduler
SMTP_PORT = 25
CRLF=b"\r\n"
DOTCRLF=b".\r\n"
OLDSTYLE_AUTH = re.compile(r"auth=(.*)", re.I)
def encode_base64(s, eol=None):
return "".join(base64.encodestring(s).split("\n"))
# Exception classes used by this module.
class SMTPException(Exception):
"""Base class for all exceptions raised by this module."""
class SMTPServerDisconnected(SMTPException):
"""Not connected to any SMTP server.
This exception is raised when the server unexpectedly disconnects,
or when an attempt is made to use the SMTP instance before
connecting it to a server.
"""
class SMTPResponseException(SMTPException):
"""Base class for all exceptions that include an SMTP error code.
These exceptions are generated in some instances when the SMTP
server returns an error code. The error code is stored in the
`smtp_code` attribute of the error, and the `smtp_error` attribute
is set to the error message.
"""
def __init__(self, code, msg):
self.smtp_code = code
self.smtp_error = msg
self.args = (code, msg)
class SMTPSenderRefused(SMTPResponseException):
"""Sender address refused.
In addition to the attributes set by on all SMTPResponseException
exceptions, this sets `sender` to the string that the SMTP refused.
"""
def __init__(self, code, msg, sender):
self.smtp_code = code
self.smtp_error = msg
self.sender = sender
self.args = (code, msg, sender)
class SMTPRecipientsRefused(SMTPException):
"""All recipient addresses refused.
The errors for each recipient are accessible through the attribute
'recipients', which is a dictionary of exactly the same sort as
SMTP.sendmail() returns.
"""
def __init__(self, recipients):
self.recipients = recipients
self.args = ( recipients,)
class SMTPDataError(SMTPResponseException):
"""The SMTP server didn't accept the data."""
class SMTPConnectError(SMTPResponseException):
"""Error during connection establishment."""
class SMTPHeloError(SMTPResponseException):
"""The server refused our HELO reply."""
class SMTPAuthenticationError(SMTPResponseException):
"""Authentication error.
Most probably the server didn't accept the username/password
combination provided.
"""
class SSLFakeSocket(object):
"""A fake socket object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__(self, realsock, sslobj):
self.realsock = realsock
self.sslobj = sslobj
def send(self, str):
self.sslobj.write(str)
return len(str)
sendall = send
def close(self):
self.realsock.close()
class SSLFakeFile(object):
"""A fake file like object that really wraps a SSLObject.
It only supports what is needed in smtplib.
"""
def __init__( self, sslobj):
self.sslobj = sslobj
def readline(self):
str = ""
chr = None
while chr != "\n":
chr = self.sslobj.read(1)
str += chr
return str
def close(self):
pass
def quoteaddr(addr):
"""Quote a subset of the email addresses defined by RFC 821.
"""
m=parseaddr(addr)[1]
return "<%s>" % m
def quotedata(data):
"""Quote data for email.
Double leading '.', and change Unix newline '\\n', or Mac '\\r' into
Internet CRLF end-of-line.
"""
return re.sub(r'(?m)^\.', '..',
re.sub(r'(?:\r\n|\n|\r(?!\n))', CRLF, data))
class SMTP(object):
"""This class manages a connection to an SMTP or ESMTP server.
SMTP objects have the following attributes::
helo_resp
This is the message given by the server in response to the
most recent HELO command.
ehlo_resp
This is the message given by the server in response to the
most recent EHLO command. This is usually multiline.
does_esmtp
This is a True value _after you do an EHLO command, if the
server supports ESMTP.
esmtp_features
This is a dictionary, which, if the server supports ESMTP,
will _after you do an EHLO command, contain the names of the
SMTP service extensions this server supports, and their
parameters (if any).
Note, all extension names are mapped to lower case in the
dictionary.
See each method's docstrings for details. In general, there is a
method of the same name to perform each SMTP command. There is also a
method called 'sendmail' that will do an entire mail transaction.
"""
file = None
helo_resp = None
ehlo_resp = None
does_esmtp = 0
def __init__(self, host='', port=25, bindto=None, logfile=None):
"""Initialize a new instance.
If specified, `host` is the name of the remote host to which to
connect. If specified, `port` specifies the port to which to connect.
By default, smtplib.SMTP_PORT is used. An SMTPConnectError is raised
if the specified `host` doesn't respond correctly. """
self.host = host
self.port = port
self._bindto = bindto
self.logfile = logfile
self.esmtp_features = {}
if host:
(code, msg) = self.connect(host, port, bindto)
if code != 220:
raise SMTPConnectError(code, msg)
def __repr__(self):
return "%s(%s, %d)" % (self.__class__.__name__, self.host, self.port)
def set_logfile(self, logfile):
self.logfile = logfile
def connect(self, host='localhost', port=0, bindto=None, retries=3):
"""Connect to a host on a given port.
If the hostname ends with a colon (`:`) followed by a number, and
there is no port specified, that suffix will be stripped off and the
number interpreted as the port number to use.
Note: This method is automatically invoked by __init__, if a host is
specified during instantiation.
"""
if not port and (host.find(':') == host.rfind(':')):
i = host.rfind(':')
if i >= 0:
host, port = host[:i], host[i+1:]
try: port = int(port)
except ValueError:
raise socket.error("nonnumeric port")
if not port:
port = SMTP_PORT
if self.logfile:
self.logfile.write('attempting SMTP.connect: %s %d\n' % (host, port))
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if self.logfile:
self.logfile.write('SMTP.connect: %s %d\n' % (host, port))
if bindto:
self.sock.bind((bindto, socket.IPPORT_USERRESERVED))
self._bindto = bindto
self._connect(sa, retries)
except socket.error as msg:
if self.logfile:
self.logfile.write('SMTP.connect fail: %s %d\n' % (host, port))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
(code, msg) = self.getreply()
if self.logfile:
self.logfile.write('SMTP.connect: %s %d\n' % (host, port))
return (code, msg)
def _connect(self, addr, retries):
retry = 0
while retry < retries:
try:
self.sock.connect(addr)
except socket.error as msg:
if msg[0] == ECONNREFUSED: # might be busy
scheduler.sleep(2)
continue
else:
raise
else:
return
retry += 1
def send(self, s):
"""Send string to the server."""
if self.logfile:
self.logfile.write(b'send: %r\n' % (s,))
if self.sock:
try:
self.sock.sendall(s)
except socket.error:
self.close()
raise SMTPServerDisconnected('Server not connected')
else:
raise SMTPServerDisconnected('please invoke connect() first')
def putcmd(self, cmd, args=""):
"""Send a command to the server."""
if args == "":
out = b'%s%s' % (cmd, CRLF)
else:
out = b'%s %s%s' % (cmd, args, CRLF)
self.send(out.encode("ascii"))
def getreply(self):
"""Get a reply from the server.
Returns a tuple consisting of:
- server response code (e.g. '250', or such, if all goes well)
Note: returns -1 if it can't read response code.
- server response string corresponding to response code (multiline
responses are converted to a single, multiline string).
Raises SMTPServerDisconnected if end-of-file is reached.
"""
resp=[]
if self.file is None:
self.file = self.sock.makefile('rb')
while 1:
try:
line = self.file.readline()
except IOError as err:
if err[0] == EINTR:
continue
else:
raise
if line == '':
self.close()
raise SMTPServerDisconnected("Connection unexpectedly closed")
if self.logfile:
self.logfile.write('reply: %r\n' % (line,))
resp.append(line[4:].strip())
code=line[:3]
# Check that the error code is syntactically correct.
# Don't attempt to read a continuation line if it is broken.
try:
errcode = int(code)
except ValueError:
errcode = -1
break
# Check if multiline response.
if line[3:4]!="-":
break
errmsg = b"\n".join(resp)
if self.logfile:
self.logfile.write('reply: retcode (%s); Msg: %s\n' % (errcode,errmsg))
return errcode, errmsg
def docmd(self, cmd, args=""):
"""Send a command, and return its response code."""
self.putcmd(cmd, args)
return self.getreply()
# std smtp commands
def helo(self, name=''):
"""SMTP 'helo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
name = name or self._bindto
if name:
self.putcmd(b"helo", name)
else:
self.putcmd(b"helo", socket.getfqdn())
(code,msg)=self.getreply()
self.helo_resp=msg
return (code,msg)
def ehlo(self, name=''):
""" SMTP 'ehlo' command.
Hostname to send for this command defaults to the FQDN of the local
host.
"""
self.esmtp_features = {}
name = name or self._bindto
if name:
self.putcmd(b"ehlo", name)
else:
self.putcmd(b"ehlo", socket.getfqdn())
(code,msg)=self.getreply()
# According to RFC1869 some (badly written)
# MTA's will disconnect on an ehlo. Toss an exception if
# that happens -ddm
if code == -1 and len(msg) == 0:
self.close()
raise SMTPServerDisconnected("Server not connected")
self.ehlo_resp=msg
if code != 250:
return (code,msg)
self.does_esmtp=1
#parse the ehlo response -ddm
resp=self.ehlo_resp.split('\n')
del resp[0]
for each in resp:
# To be able to communicate with as many SMTP servers as possible,
# we have to take the old-style auth advertisement into account,
# because:
# 1) Else our SMTP feature parser gets confused.
# 2) There are some servers that only advertise the auth methods we
# support using the old style.
auth_match = OLDSTYLE_AUTH.match(each)
if auth_match:
# This doesn't remove duplicates, but that's no problem
self.esmtp_features["auth"] = self.esmtp_features.get("auth", "") \
+ " " + auth_match.groups(0)[0]
continue
# RFC 1869 requires a space between ehlo keyword and parameters.
# It's actually stricter, in that only spaces are allowed between
# parameters, but were not going to check for that here. Note
# that the space isn't present if there are no parameters.
m=re.match(r'(?P<feature>[A-Za-z0-9][A-Za-z0-9\-]*)',each)
if m:
feature=m.group("feature").lower()
params=m.string[m.end("feature"):].strip()
if feature == "auth":
self.esmtp_features[feature] = self.esmtp_features.get(feature, "") \
+ " " + params
else:
self.esmtp_features[feature]=params
return (code,msg)
def has_extn(self, opt):
"""Does the server support a given SMTP service extension?"""
return self.esmtp_features.has_key(opt.lower())
def help(self, args=''):
"""SMTP 'help' command.
Returns help text from server."""
self.putcmd(b"help", args)
return self.getreply()
def rset(self):
"""SMTP 'rset' command -- resets session."""
return self.docmd("rset")
def noop(self):
"""SMTP 'noop' command -- doesn't do anything :>"""
return self.docmd("noop")
def mail(self,sender, options=None):
"""SMTP 'mail' command -- begins mail xfer session."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd(b"mail", b"FROM:%s%s" % (quoteaddr(sender) ,optionlist))
return self.getreply()
def rcpt(self,recip, options=None):
"""SMTP 'rcpt' command -- indicates 1 recipient for this mail."""
optionlist = ''
if options and self.does_esmtp:
optionlist = ' ' + ' '.join(options)
self.putcmd(b"rcpt", b"TO:%s%s" % (quoteaddr(recip),optionlist))
return self.getreply()
def data(self,msg):
"""SMTP 'DATA' command -- sends message data to server.
Automatically quotes lines beginning with a period per rfc821.
Raises SMTPDataError if there is an unexpected reply to the
DATA command; the return value from this method is the final
response code received when the all data is sent.
"""
self.putcmd(b"data")
(code,repl)=self.getreply()
if self.logfile:
self.logfile.write("data: %s %s\n" % (code,repl))
if code != 354:
raise SMTPDataError(code,repl)
else:
q = quotedata(msg)
if q[-2:] != CRLF:
q += CRLF
q += DOTCRLF
self.send(q)
(code, msg)=self.getreply()
if self.logfile:
self.logfile.write("data: %s %r\n" % (code,msg))
return (code,msg)
def verify(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd(b"vrfy", quoteaddr(address))
return self.getreply()
# a.k.a.
vrfy=verify
def expn(self, address):
"""SMTP 'verify' command -- checks for address validity."""
self.putcmd(b"expn", quoteaddr(address))
return self.getreply()
# some useful methods
def login(self, user, password):
"""Log in on an SMTP server that requires authentication.
The arguments are:
- user: The user name to authenticate with.
- password: The password for the authentication.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first.
This method will return normally if the authentication was successful.
This method may raise the following exceptions:
SMTPHeloError The server didn't reply properly to
the helo greeting.
SMTPAuthenticationError The server didn't accept the username/
password combination.
SMTPException No suitable authentication method was
found.
"""
def encode_cram_md5(challenge, user, password):
challenge = base64.decodestring(challenge)
response = user + " " + hmac.HMAC(password, challenge).hexdigest()
return encode_base64(response, eol="")
def encode_plain(user, password):
return encode_base64("%s\0%s\0%s" % (user, user, password), eol="")
AUTH_PLAIN = b"PLAIN"
AUTH_CRAM_MD5 = b"CRAM-MD5"
AUTH_LOGIN = b"LOGIN"
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code, resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
if not self.has_extn("auth"):
raise SMTPException("SMTP AUTH extension not supported by server.")
# Authentication methods the server supports:
authlist = self.esmtp_features["auth"].split()
# List of authentication methods we support: from preferred to
# less preferred methods. Except for the purpose of testing the weaker
# ones, we prefer stronger methods like CRAM-MD5:
preferred_auths = [AUTH_CRAM_MD5, AUTH_PLAIN, AUTH_LOGIN]
# Determine the authentication method we'll use
authmethod = None
for method in preferred_auths:
if method in authlist:
authmethod = method
break
if authmethod == AUTH_CRAM_MD5:
(code, resp) = self.docmd("AUTH", AUTH_CRAM_MD5)
if code == 503:
# 503 == 'Error: already authenticated'
return (code, resp)
(code, resp) = self.docmd(encode_cram_md5(resp, user, password))
elif authmethod == AUTH_PLAIN:
(code, resp) = self.docmd("AUTH",
AUTH_PLAIN + " " + encode_plain(user, password))
elif authmethod == AUTH_LOGIN:
(code, resp) = self.docmd("AUTH",
"%s %s" % (AUTH_LOGIN, encode_base64(user, eol="")))
if code != 334:
raise SMTPAuthenticationError(code, resp)
(code, resp) = self.docmd(encode_base64(password, eol=""))
elif authmethod == None:
raise SMTPException("No suitable authentication method found.")
if code not in [235, 503]:
# 235 == 'Authentication successful'
# 503 == 'Error: already authenticated'
raise SMTPAuthenticationError(code, resp)
return (code, resp)
def starttls(self, keyfile = None, certfile = None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session. If you provide the keyfile and certfile parameters,
the identity of the SMTP server and client can be checked. This,
however, depends on whether the socket module really checks the
certificates.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
sslobj = socket.ssl(self.sock, keyfile, certfile)
self.sock = SSLFakeSocket(self.sock, sslobj)
self.file = SSLFakeFile(sslobj)
return (resp, reply)
def sendmail(self, from_addr, to_addrs, msg, mail_options=None, rcpt_options=None):
"""This command performs an entire mail transaction.
The arguments are::
:from_addr: The address sending this mail.
:to_addrs: A list of addresses to send this mail to. A bare
string will be treated as a list with 1 address.
:msg: The message to send.
:mail_options: List of ESMTP options (such as 8bitmime) for the
mail command.
:rcpt_options: List of ESMTP options (such as DSN commands) for
all the rcpt commands.
If there has been no previous EHLO or HELO command this session, this
method tries ESMTP EHLO first. If the server does ESMTP, message size
and each of the specified options will be passed to it. If EHLO
fails, HELO will be tried and ESMTP options suppressed.
This method will return normally if the mail is accepted for at least
one recipient. It returns a dictionary, with one entry for each
recipient that was refused. Each entry contains a tuple of the SMTP
error code and the accompanying error message sent by the server.
This method may raise the following exceptions::
:SMTPHeloError: The server didn't reply properly to
the helo greeting.
:SMTPRecipientsRefused: The server rejected ALL recipients
(no mail was sent).
:SMTPSenderRefused: The server didn't accept the from_addr.
:SMTPDataError: The server replied with an unexpected
error code (other than a refusal of
a recipient).
Note: the connection will be open even after an exception is raised.
Example::
>>> import smtplib
>>> s=smtplib.SMTP("localhost")
>>> tolist=["one@one.org","two@two.org","three@three.org","four@four.org"]
>>> msg = '''\\
... From: Me@my.org
... Subject: testin'...
...
... This is a test '''
>>> s.sendmail("me@my.org",tolist,msg)
{ "three@three.org" : ( 550 ,"User unknown" ) }
>>> s.quit()
In the above example, the message was accepted for delivery to three
of the four addresses, and one was rejected, with the error code
550. If all addresses are accepted, then the method will return an
empty dictionary.
"""
if self.helo_resp is None and self.ehlo_resp is None:
if not (200 <= self.ehlo()[0] <= 299):
(code,resp) = self.helo()
if not (200 <= code <= 299):
raise SMTPHeloError(code, resp)
esmtp_opts = []
if self.does_esmtp:
if self.has_extn('size'):
esmtp_opts.append("size={0:d}".format(len(msg)))
if mail_options:
for option in mail_options:
esmtp_opts.append(option)
(code,resp) = self.mail(from_addr, esmtp_opts)
if code != 250:
self.rset()
raise SMTPSenderRefused(code, resp, from_addr)
senderrs={}
if isinstance(to_addrs, types.StringTypes):
to_addrs = [to_addrs]
for each in to_addrs:
(code,resp)=self.rcpt(each, rcpt_options)
if (code != 250) and (code != 251):
senderrs[each]=(code,resp)
if len(senderrs)==len(to_addrs):
# the server refused all our recipients
self.rset()
raise SMTPRecipientsRefused(senderrs)
(code,resp) = self.data(msg)
if code != 250:
self.rset()
raise SMTPDataError(code, resp)
#if we got here then somebody got our mail
return senderrs
def close(self):
"""Close the connection to the SMTP server."""
if self.file:
self.file.close()
self.file = None
if self.sock:
self.sock.close()
self.sock = None
def quit(self):
"""Terminate the SMTP session."""
rv = self.docmd("QUIT")
self.close()
return rv
class Envelope(object):
"""Envelope([mail_from], [recpt_list])
An envelope holds an SMTP conversation from the MAIL FROM command to the end of
a DATA part. It may be re-sent by calling the 'send()' method with an SMTP
connection object. The message body can be parsed by passing in an 'email'
parser object to the 'parse()' method."""
def __init__ (self, mail_from=None, rcpt_to=None):
self.mail_from = mail_from
self.rcpt_to = rcpt_to or []
self.data = None
self.message = None
def __repr__ (self):
return "Envelope(%r, %r)" % (self.mail_from, self.rcpt_to)
def __str__(self):
s = ["MAIL FROM: %s" % (self.mail_from,)]
for rcpt in self.rcpt_to:
s.append("RCPT TO: %s" % (rcpt))
s.append("\n")
if self.message:
s.append(str(self.message))
elif self.data:
s.append(self.data.getvalue())
else:
s.append("<no data!>")
return "\n".join(s)
def has_data(self):
"""has_data() is true if there is data or message."""
if self.data:
return self.data.tell()
elif self.message:
return len(self.message)
else:
return 0
def write(self, text):
"""write(text)
Writes text to the message body."""
if self.data is None:
self.data = BytesIO()
self.data.write(text)
def writeln(self, text):
"""writeln(text)
Writes text to the message body, adding a newline."""
self.write(text)
self.write("\n")
def set_from(self, frm):
"""set_from(from_address)
Sets the MAIL FROM address for this Envelope."""
self.mail_from = frm
def add_rcpt(self, rcpt):
"""add_rcpt(recipient)
Adds a new recipient to the RCPT list."""
self.rcpt_to.append(rcpt)
def parse_data(self, parser):
"""parse_data(parser)
Instructs the Envelope to convert its raw 'data' attribute to a 'message'
attribute using the supplied parser object. A 'message' attribute is an
'email' package Message tree."""
if self.data is not None:
self.data.seek(0,0)
# parser should be email.Parser.Parser object, or subclass thereof.
self.message = parser.parse(self.data)
self.data.close()
if self.message:
self.data = None
def send(self, smtp, mail_options=None, rcpt_options=None):
"""Mails this envelope using the supplied SMTP client object."""
if self.message:
return smtp.sendmail(self.mail_from, self.rcpt_to, self.message.as_string(), mail_options, rcpt_options)
elif self.data:
return smtp.sendmail(self.mail_from, self.rcpt_to, self.data.getvalue(), mail_options, rcpt_options)
else:
body = "From: %s\nTo: %s\n\nEnvelope message." % (self.mail_from, ", ".join(self.rcpt_to))
return smtp.sendmail(self.mail_from, self.rcpt_to, body, mail_options, rcpt_options)
def get_mailer(host="", port=SMTP_PORT, logfile=None):
return SMTP(str(host), int(port), logfile=logfile)
def test(argv):
def prompt(prompt):
return raw_input(prompt+": ")
#fromaddr = prompt("From")
#toaddrs = prompt("To")
#mailhost = prompt("mailhost")
fromaddr = "keith@dartworks.biz"
toaddrs = "keith@dartworks.biz"
mailhost = "localhost"
#print ("Enter message, end with empty line:")
msg = 'From: %s\nTo: %s\nSubject: test message\n\n' % (fromaddr, toaddrs)
#while 1:
# line = raw_input("> ")
# if not line:
# break
# msg = msg + line
msg = msg + "A message \n\n.\n"
server = SMTP()
server.connect(mailhost, 9025)
server.sendmail(fromaddr, toaddrs.split(","), msg)
server.quit()
# Test the sendmail method, which tests most of the others.
# Note: This always sends to localhost.
if __name__ == '__main__':
import sys
test(sys.argv)
| 35.233918
| 116
| 0.581378
|
56bf2c682d740dc60134d252a83073d17a1976c5
| 353
|
py
|
Python
|
docs/print_ops/scripts/make_available.py
|
compunautics/compunaut_rundeck_jobs
|
3e9ab9aa115d819ec30bf98bf4b8c73f74f2897d
|
[
"Apache-2.0"
] | null | null | null |
docs/print_ops/scripts/make_available.py
|
compunautics/compunaut_rundeck_jobs
|
3e9ab9aa115d819ec30bf98bf4b8c73f74f2897d
|
[
"Apache-2.0"
] | null | null | null |
docs/print_ops/scripts/make_available.py
|
compunautics/compunaut_rundeck_jobs
|
3e9ab9aa115d819ec30bf98bf4b8c73f74f2897d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import pwd
import grp
import json
# DEFINE VARIABLES
bool_path = "/etc/consul.d/filament_bool"
bool_dict = {"is_available": "True"}
uid = pwd.getpwnam('consul').pw_uid
gid = grp.getgrnam('consul').gr_gid
# MAKE AVAILABLE
with open(bool_path, 'w') as outfile:
json.dump(bool_dict, outfile)
os.chown(bool_path, uid, gid)
| 18.578947
| 41
| 0.730878
|
05eeee2d74e2b133b6b525a3575e8a4c2a4fe77e
| 48,475
|
py
|
Python
|
yt_dlp/postprocessor/ffmpeg.py
|
Mehmetbaba55/yt-dlp
|
5b804e39066e01c8cb421957bad1ddbc8daa9831
|
[
"Unlicense"
] | null | null | null |
yt_dlp/postprocessor/ffmpeg.py
|
Mehmetbaba55/yt-dlp
|
5b804e39066e01c8cb421957bad1ddbc8daa9831
|
[
"Unlicense"
] | null | null | null |
yt_dlp/postprocessor/ffmpeg.py
|
Mehmetbaba55/yt-dlp
|
5b804e39066e01c8cb421957bad1ddbc8daa9831
|
[
"Unlicense"
] | null | null | null |
from __future__ import unicode_literals
import collections
import io
import itertools
import os
import subprocess
import time
import re
import json
from .common import AudioConversionError, PostProcessor
from ..compat import compat_str
from ..utils import (
determine_ext,
dfxp2srt,
encodeArgument,
encodeFilename,
float_or_none,
_get_exe_version_output,
detect_exe_version,
is_outdated_version,
ISO639Utils,
orderedSet,
Popen,
PostProcessingError,
prepend_extension,
replace_extension,
shell_quote,
traverse_obj,
variadic,
write_json_file,
)
EXT_TO_OUT_FORMATS = {
'aac': 'adts',
'flac': 'flac',
'm4a': 'ipod',
'mka': 'matroska',
'mkv': 'matroska',
'mpg': 'mpeg',
'ogv': 'ogg',
'ts': 'mpegts',
'wma': 'asf',
'wmv': 'asf',
'vtt': 'webvtt',
}
ACODECS = {
'mp3': 'libmp3lame',
'aac': 'aac',
'flac': 'flac',
'm4a': 'aac',
'opus': 'libopus',
'vorbis': 'libvorbis',
'wav': None,
'alac': None,
}
class FFmpegPostProcessorError(PostProcessingError):
pass
class FFmpegPostProcessor(PostProcessor):
def __init__(self, downloader=None):
PostProcessor.__init__(self, downloader)
self._determine_executables()
def check_version(self):
if not self.available:
raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location')
required_version = '10-0' if self.basename == 'avconv' else '1.0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = 'Your copy of %s is outdated, update %s to version %s or newer if you encounter any errors.' % (
self.basename, self.basename, required_version)
self.report_warning(warning)
@staticmethod
def get_versions_and_features(downloader=None):
pp = FFmpegPostProcessor(downloader)
return pp._versions, pp._features
@staticmethod
def get_versions(downloader=None):
return FFmpegPostProcessor.get_version_and_features(downloader)[0]
def _determine_executables(self):
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
def get_ffmpeg_version(path, prog):
out = _get_exe_version_output(path, ['-bsfs'])
ver = detect_exe_version(out) if out else False
if ver:
regexs = [
r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1]
r'n([0-9.]+)$', # Arch Linux
# 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/
]
for regex in regexs:
mobj = re.match(regex, ver)
if mobj:
ver = mobj.group(1)
self._versions[prog] = ver
if prog != 'ffmpeg' or not out:
return
mobj = re.search(r'(?m)^\s+libavformat\s+(?:[0-9. ]+)\s+/\s+(?P<runtime>[0-9. ]+)', out)
lavf_runtime_version = mobj.group('runtime').replace(' ', '') if mobj else None
self._features = {
'fdk': '--enable-libfdk-aac' in out,
'setts': 'setts' in out.splitlines(),
'needs_adtstoasc': is_outdated_version(lavf_runtime_version, '57.56.100', False),
}
self.basename = None
self.probe_basename = None
self._paths = None
self._versions = None
self._features = {}
prefer_ffmpeg = self.get_param('prefer_ffmpeg', True)
location = self.get_param('ffmpeg_location')
if location is None:
self._paths = {p: p for p in programs}
else:
if not os.path.exists(location):
self.report_warning(
'ffmpeg-location %s does not exist! '
'Continuing without ffmpeg.' % (location))
self._versions = {}
return
elif os.path.isdir(location):
dirname, basename = location, None
else:
basename = os.path.splitext(os.path.basename(location))[0]
basename = next((p for p in programs if basename.startswith(p)), 'ffmpeg')
dirname = os.path.dirname(os.path.abspath(location))
if basename in ('ffmpeg', 'ffprobe'):
prefer_ffmpeg = True
self._paths = dict(
(p, os.path.join(dirname, p)) for p in programs)
if basename:
self._paths[basename] = location
self._versions = {}
for p in programs:
get_ffmpeg_version(self._paths[p], p)
if prefer_ffmpeg is False:
prefs = ('avconv', 'ffmpeg')
else:
prefs = ('ffmpeg', 'avconv')
for p in prefs:
if self._versions[p]:
self.basename = p
break
if prefer_ffmpeg is False:
prefs = ('avprobe', 'ffprobe')
else:
prefs = ('ffprobe', 'avprobe')
for p in prefs:
if self._versions[p]:
self.probe_basename = p
break
if self.basename == 'avconv':
self.deprecation_warning(
'Support for avconv is deprecated and may be removed in a future version. Use ffmpeg instead')
if self.probe_basename == 'avprobe':
self.deprecation_warning(
'Support for avprobe is deprecated and may be removed in a future version. Use ffprobe instead')
@property
def available(self):
return self.basename is not None
@property
def executable(self):
return self._paths[self.basename]
@property
def probe_available(self):
return self.probe_basename is not None
@property
def probe_executable(self):
return self._paths[self.probe_basename]
@staticmethod
def stream_copy_opts(copy=True, *, ext=None):
yield from ('-map', '0')
# Don't copy Apple TV chapters track, bin_data
# See https://github.com/yt-dlp/yt-dlp/issues/2, #19042, #19024, https://trac.ffmpeg.org/ticket/6016
yield from ('-dn', '-ignore_unknown')
if copy:
yield from ('-c', 'copy')
# For some reason, '-c copy -map 0' is not enough to copy subtitles
if ext in ('mp4', 'mov'):
yield from ('-c:s', 'mov_text')
def get_audio_codec(self, path):
if not self.probe_available and not self.available:
raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location')
try:
if self.probe_available:
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-show_streams')]
else:
cmd = [
encodeFilename(self.executable, True),
encodeArgument('-i')]
cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
self.write_debug('%s command line: %s' % (self.basename, shell_quote(cmd)))
handle = Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_data, stderr_data = handle.communicate_or_kill()
expected_ret = 0 if self.probe_available else 1
if handle.wait() != expected_ret:
return None
except (IOError, OSError):
return None
output = (stdout_data if self.probe_available else stderr_data).decode('ascii', 'ignore')
if self.probe_available:
audio_codec = None
for line in output.split('\n'):
if line.startswith('codec_name='):
audio_codec = line.split('=')[1].strip()
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return audio_codec
else:
# Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME
mobj = re.search(
r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)',
output)
if mobj:
return mobj.group(1)
return None
def get_metadata_object(self, path, opts=[]):
if self.probe_basename != 'ffprobe':
if self.probe_available:
self.report_warning('Only ffprobe is supported for metadata extraction')
raise PostProcessingError('ffprobe not found. Please install or provide the path using --ffmpeg-location')
self.check_version()
cmd = [
encodeFilename(self.probe_executable, True),
encodeArgument('-hide_banner'),
encodeArgument('-show_format'),
encodeArgument('-show_streams'),
encodeArgument('-print_format'),
encodeArgument('json'),
]
cmd += opts
cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
self.write_debug('ffprobe command line: %s' % shell_quote(cmd))
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate()
return json.loads(stdout.decode('utf-8', 'replace'))
def get_stream_number(self, path, keys, value):
streams = self.get_metadata_object(path)['streams']
num = next(
(i for i, stream in enumerate(streams) if traverse_obj(stream, keys, casesense=False) == value),
None)
return num, len(streams)
def _get_real_video_duration(self, filepath, fatal=True):
try:
duration = float_or_none(
traverse_obj(self.get_metadata_object(filepath), ('format', 'duration')))
if not duration:
raise PostProcessingError('ffprobe returned empty duration')
return duration
except PostProcessingError as e:
if fatal:
raise PostProcessingError(f'Unable to determine video duration: {e.msg}')
def _duration_mismatch(self, d1, d2):
if not d1 or not d2:
return None
# The duration is often only known to nearest second. So there can be <1sec disparity natually.
# Further excuse an additional <1sec difference.
return abs(d1 - d2) > 2
def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, **kwargs):
return self.real_run_ffmpeg(
[(path, []) for path in input_paths],
[(out_path, opts)], **kwargs)
def real_run_ffmpeg(self, input_path_opts, output_path_opts, *, expected_retcodes=(0,)):
self.check_version()
oldest_mtime = min(
os.stat(encodeFilename(path)).st_mtime for path, _ in input_path_opts if path)
cmd = [encodeFilename(self.executable, True), encodeArgument('-y')]
# avconv does not have repeat option
if self.basename == 'ffmpeg':
cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
def make_args(file, args, name, number):
keys = ['_%s%d' % (name, number), '_%s' % name]
if name == 'o':
args += ['-movflags', '+faststart']
if number == 1:
keys.append('')
args += self._configuration_args(self.basename, keys)
if name == 'i':
args.append('-i')
return (
[encodeArgument(arg) for arg in args]
+ [encodeFilename(self._ffmpeg_filename_argument(file), True)])
for arg_type, path_opts in (('i', input_path_opts), ('o', output_path_opts)):
cmd += itertools.chain.from_iterable(
make_args(path, list(opts), arg_type, i + 1)
for i, (path, opts) in enumerate(path_opts) if path)
self.write_debug('ffmpeg command line: %s' % shell_quote(cmd))
p = Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, stderr = p.communicate_or_kill()
if p.returncode not in variadic(expected_retcodes):
stderr = stderr.decode('utf-8', 'replace').strip()
self.write_debug(stderr)
raise FFmpegPostProcessorError(stderr.split('\n')[-1])
for out_path, _ in output_path_opts:
if out_path:
self.try_utime(out_path, oldest_mtime, oldest_mtime)
return stderr.decode('utf-8', 'replace')
def run_ffmpeg(self, path, out_path, opts, **kwargs):
return self.run_ffmpeg_multiple_files([path], out_path, opts, **kwargs)
@staticmethod
def _ffmpeg_filename_argument(fn):
# Always use 'file:' because the filename may contain ':' (ffmpeg
# interprets that as a protocol) or can start with '-' (-- is broken in
# ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
# Also leave '-' intact in order not to break streaming to stdout.
if fn.startswith(('http://', 'https://')):
return fn
return 'file:' + fn if fn != '-' else fn
@staticmethod
def _quote_for_ffmpeg(string):
# See https://ffmpeg.org/ffmpeg-utils.html#toc-Quoting-and-escaping
# A sequence of '' produces '\'''\'';
# final replace removes the empty '' between \' \'.
string = string.replace("'", r"'\''").replace("'''", "'")
# Handle potential ' at string boundaries.
string = string[1:] if string[0] == "'" else "'" + string
return string[:-1] if string[-1] == "'" else string + "'"
def force_keyframes(self, filename, timestamps):
timestamps = orderedSet(timestamps)
if timestamps[0] == 0:
timestamps = timestamps[1:]
keyframe_file = prepend_extension(filename, 'keyframes.temp')
self.to_screen(f'Re-encoding "{filename}" with appropriate keyframes')
self.run_ffmpeg(filename, keyframe_file, [
*self.stream_copy_opts(False, ext=determine_ext(filename)),
'-force_key_frames', ','.join(f'{t:.6f}' for t in timestamps)])
return keyframe_file
def concat_files(self, in_files, out_file, concat_opts=None):
"""
Use concat demuxer to concatenate multiple files having identical streams.
Only inpoint, outpoint, and duration concat options are supported.
See https://ffmpeg.org/ffmpeg-formats.html#concat-1 for details
"""
concat_file = f'{out_file}.concat'
self.write_debug(f'Writing concat spec to {concat_file}')
with open(concat_file, 'wt', encoding='utf-8') as f:
f.writelines(self._concat_spec(in_files, concat_opts))
out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file)))
self.real_run_ffmpeg(
[(concat_file, ['-hide_banner', '-nostdin', '-f', 'concat', '-safe', '0'])],
[(out_file, out_flags)])
os.remove(concat_file)
@classmethod
def _concat_spec(cls, in_files, concat_opts=None):
if concat_opts is None:
concat_opts = [{}] * len(in_files)
yield 'ffconcat version 1.0\n'
for file, opts in zip(in_files, concat_opts):
yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n'
# Iterate explicitly to yield the following directives in order, ignoring the rest.
for directive in 'inpoint', 'outpoint', 'duration':
if directive in opts:
yield f'{directive} {opts[directive]}\n'
class FFmpegExtractAudioPP(FFmpegPostProcessor):
COMMON_AUDIO_EXTS = ('wav', 'flac', 'm4a', 'aiff', 'mp3', 'ogg', 'mka', 'opus', 'wma')
SUPPORTED_EXTS = ('best', 'aac', 'flac', 'mp3', 'm4a', 'opus', 'vorbis', 'wav', 'alac')
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
FFmpegPostProcessor.__init__(self, downloader)
self._preferredcodec = preferredcodec or 'best'
self._preferredquality = float_or_none(preferredquality)
self._nopostoverwrites = nopostoverwrites
def _quality_args(self, codec):
if self._preferredquality is None:
return []
elif self._preferredquality > 10:
return ['-b:a', f'{self._preferredquality}k']
limits = {
'libmp3lame': (10, 0),
'libvorbis': (0, 10),
# FFmpeg's AAC encoder does not have an upper limit for the value of -q:a.
# Experimentally, with values over 4, bitrate changes were minimal or non-existent
'aac': (0.1, 4),
'libfdk_aac': (1, 5),
}.get(codec)
if not limits:
return []
q = limits[1] + (limits[0] - limits[1]) * (self._preferredquality / 10)
if codec == 'libfdk_aac':
return ['-vbr', f'{int(q)}']
return ['-q:a', f'{q}']
def run_ffmpeg(self, path, out_path, codec, more_opts):
if codec is None:
acodec_opts = []
else:
acodec_opts = ['-acodec', codec]
opts = ['-vn'] + acodec_opts + more_opts
try:
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
except FFmpegPostProcessorError as err:
raise AudioConversionError(err.msg)
@PostProcessor._restrict_to(images=False)
def run(self, information):
orig_path = path = information['filepath']
orig_ext = information['ext']
if self._preferredcodec == 'best' and orig_ext in self.COMMON_AUDIO_EXTS:
self.to_screen('Skipping audio extraction since the file is already in a common audio format')
return [], information
filecodec = self.get_audio_codec(path)
if filecodec is None:
raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
# Lossless, but in another container
acodec = 'copy'
extension = 'm4a'
more_opts = ['-bsf:a', 'aac_adtstoasc']
elif filecodec in ['aac', 'flac', 'mp3', 'vorbis', 'opus']:
# Lossless if possible
acodec = 'copy'
extension = filecodec
if filecodec == 'aac':
more_opts = ['-f', 'adts']
if filecodec == 'vorbis':
extension = 'ogg'
elif filecodec == 'alac':
acodec = None
extension = 'm4a'
more_opts += ['-acodec', 'alac']
else:
# MP3 otherwise.
acodec = 'libmp3lame'
extension = 'mp3'
more_opts = self._quality_args(acodec)
else:
# We convert the audio (lossy if codec is lossy)
acodec = ACODECS[self._preferredcodec]
if acodec == 'aac' and self._features.get('fdk'):
acodec = 'libfdk_aac'
extension = self._preferredcodec
more_opts = self._quality_args(acodec)
if self._preferredcodec == 'aac':
more_opts += ['-f', 'adts']
elif self._preferredcodec == 'm4a':
more_opts += ['-bsf:a', 'aac_adtstoasc']
elif self._preferredcodec == 'vorbis':
extension = 'ogg'
elif self._preferredcodec == 'wav':
extension = 'wav'
more_opts += ['-f', 'wav']
elif self._preferredcodec == 'alac':
extension = 'm4a'
more_opts += ['-acodec', 'alac']
prefix, sep, ext = path.rpartition('.') # not os.path.splitext, since the latter does not work on unicode in all setups
temp_path = new_path = prefix + sep + extension
if new_path == path:
orig_path = prepend_extension(path, 'orig')
temp_path = prepend_extension(path, 'temp')
if (self._nopostoverwrites and os.path.exists(encodeFilename(new_path))
and os.path.exists(encodeFilename(orig_path))):
self.to_screen('Post-process file %s exists, skipping' % new_path)
return [], information
try:
self.to_screen(f'Destination: {new_path}')
self.run_ffmpeg(path, temp_path, acodec, more_opts)
except AudioConversionError as e:
raise PostProcessingError(
'audio conversion failed: ' + e.msg)
except Exception:
raise PostProcessingError('error running ' + self.basename)
os.replace(path, orig_path)
os.replace(temp_path, new_path)
information['filepath'] = new_path
information['ext'] = extension
# Try to update the date time for extracted audio file.
if information.get('filetime') is not None:
self.try_utime(
new_path, time.time(), information['filetime'],
errnote='Cannot update utime of audio file')
return [orig_path], information
class FFmpegVideoConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('mp4', 'mkv', 'flv', 'webm', 'mov', 'avi', 'mp3', 'mka', 'm4a', 'ogg', 'opus')
FORMAT_RE = re.compile(r'{0}(?:/{0})*$'.format(r'(?:\w+>)?(?:%s)' % '|'.join(SUPPORTED_EXTS)))
_ACTION = 'converting'
def __init__(self, downloader=None, preferedformat=None):
super(FFmpegVideoConvertorPP, self).__init__(downloader)
self._preferedformats = preferedformat.lower().split('/')
def _target_ext(self, source_ext):
for pair in self._preferedformats:
kv = pair.split('>')
if len(kv) == 1 or kv[0].strip() == source_ext:
return kv[-1].strip()
@staticmethod
def _options(target_ext):
if target_ext == 'avi':
return ['-c:v', 'libxvid', '-vtag', 'XVID']
return []
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename, source_ext = info['filepath'], info['ext'].lower()
target_ext = self._target_ext(source_ext)
_skip_msg = (
f'could not find a mapping for {source_ext}' if not target_ext
else f'already is in target format {source_ext}' if source_ext == target_ext
else None)
if _skip_msg:
self.to_screen(f'Not {self._ACTION} media file "{filename}"; {_skip_msg}')
return [], info
outpath = replace_extension(filename, target_ext, source_ext)
self.to_screen(f'{self._ACTION.title()} video from {source_ext} to {target_ext}; Destination: {outpath}')
self.run_ffmpeg(filename, outpath, self._options(target_ext))
info['filepath'] = outpath
info['format'] = info['ext'] = target_ext
return [filename], info
class FFmpegVideoRemuxerPP(FFmpegVideoConvertorPP):
_ACTION = 'remuxing'
@staticmethod
def _options(target_ext):
return FFmpegPostProcessor.stream_copy_opts()
class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
def __init__(self, downloader=None, already_have_subtitle=False):
super(FFmpegEmbedSubtitlePP, self).__init__(downloader)
self._already_have_subtitle = already_have_subtitle
@PostProcessor._restrict_to(images=False)
def run(self, info):
if info['ext'] not in ('mp4', 'webm', 'mkv'):
self.to_screen('Subtitles can only be embedded in mp4, webm or mkv files')
return [], info
subtitles = info.get('requested_subtitles')
if not subtitles:
self.to_screen('There aren\'t any subtitles to embed')
return [], info
filename = info['filepath']
# Disabled temporarily. There needs to be a way to overide this
# in case of duration actually mismatching in extractor
# See: https://github.com/yt-dlp/yt-dlp/issues/1870, https://github.com/yt-dlp/yt-dlp/issues/1385
'''
if info.get('duration') and not info.get('__real_download') and self._duration_mismatch(
self._get_real_video_duration(filename, False), info['duration']):
self.to_screen(f'Skipping {self.pp_key()} since the real and expected durations mismatch')
return [], info
'''
ext = info['ext']
sub_langs, sub_names, sub_filenames = [], [], []
webm_vtt_warn = False
mp4_ass_warn = False
for lang, sub_info in subtitles.items():
if not os.path.exists(sub_info.get('filepath', '')):
self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
continue
sub_ext = sub_info['ext']
if sub_ext == 'json':
self.report_warning('JSON subtitles cannot be embedded')
elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
sub_langs.append(lang)
sub_names.append(sub_info.get('name'))
sub_filenames.append(sub_info['filepath'])
else:
if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
webm_vtt_warn = True
self.report_warning('Only WebVTT subtitles can be embedded in webm files')
if not mp4_ass_warn and ext == 'mp4' and sub_ext == 'ass':
mp4_ass_warn = True
self.report_warning('ASS subtitles cannot be properly embedded in mp4 files; expect issues')
if not sub_langs:
return [], info
input_files = [filename] + sub_filenames
opts = [
*self.stream_copy_opts(ext=info['ext']),
# Don't copy the existing subtitles, we may be running the
# postprocessor a second time
'-map', '-0:s',
]
for i, (lang, name) in enumerate(zip(sub_langs, sub_names)):
opts.extend(['-map', '%d:0' % (i + 1)])
lang_code = ISO639Utils.short2long(lang) or lang
opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
if name:
opts.extend(['-metadata:s:s:%d' % i, 'handler_name=%s' % name,
'-metadata:s:s:%d' % i, 'title=%s' % name])
temp_filename = prepend_extension(filename, 'temp')
self.to_screen('Embedding subtitles in "%s"' % filename)
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.replace(temp_filename, filename)
files_to_delete = [] if self._already_have_subtitle else sub_filenames
return files_to_delete, info
class FFmpegMetadataPP(FFmpegPostProcessor):
def __init__(self, downloader, add_metadata=True, add_chapters=True, add_infojson='if_exists'):
FFmpegPostProcessor.__init__(self, downloader)
self._add_metadata = add_metadata
self._add_chapters = add_chapters
self._add_infojson = add_infojson
@staticmethod
def _options(target_ext):
audio_only = target_ext == 'm4a'
yield from FFmpegPostProcessor.stream_copy_opts(not audio_only)
if audio_only:
yield from ('-vn', '-acodec', 'copy')
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename, metadata_filename = info['filepath'], None
files_to_delete, options = [], []
if self._add_chapters and info.get('chapters'):
metadata_filename = replace_extension(filename, 'meta')
options.extend(self._get_chapter_opts(info['chapters'], metadata_filename))
files_to_delete.append(metadata_filename)
if self._add_metadata:
options.extend(self._get_metadata_opts(info))
if self._add_infojson:
if info['ext'] in ('mkv', 'mka'):
infojson_filename = info.get('infojson_filename')
options.extend(self._get_infojson_opts(info, infojson_filename))
if not infojson_filename:
files_to_delete.append(info.get('infojson_filename'))
elif self._add_infojson is True:
self.to_screen('The info-json can only be attached to mkv/mka files')
if not options:
self.to_screen('There isn\'t any metadata to add')
return [], info
temp_filename = prepend_extension(filename, 'temp')
self.to_screen('Adding metadata to "%s"' % filename)
self.run_ffmpeg_multiple_files(
(filename, metadata_filename), temp_filename,
itertools.chain(self._options(info['ext']), *options))
for file in filter(None, files_to_delete):
os.remove(file) # Don't obey --keep-files
os.replace(temp_filename, filename)
return [], info
@staticmethod
def _get_chapter_opts(chapters, metadata_filename):
with io.open(metadata_filename, 'wt', encoding='utf-8') as f:
def ffmpeg_escape(text):
return re.sub(r'([\\=;#\n])', r'\\\1', text)
metadata_file_content = ';FFMETADATA1\n'
for chapter in chapters:
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
chapter_title = chapter.get('title')
if chapter_title:
metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
f.write(metadata_file_content)
yield ('-map_metadata', '1')
def _get_metadata_opts(self, info):
meta_prefix = 'meta'
metadata = collections.defaultdict(dict)
def add(meta_list, info_list=None):
value = next((
str(info[key]) for key in [f'{meta_prefix}_'] + list(variadic(info_list or meta_list))
if info.get(key) is not None), None)
if value not in ('', None):
metadata['common'].update({meta_f: value for meta_f in variadic(meta_list)})
# See [1-4] for some info on media metadata/metadata supported
# by ffmpeg.
# 1. https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
# 2. https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
# 3. https://kodi.wiki/view/Video_file_tagging
add('title', ('track', 'title'))
add('date', 'upload_date')
add(('description', 'synopsis'), 'description')
add(('purl', 'comment'), 'webpage_url')
add('track', 'track_number')
add('artist', ('artist', 'creator', 'uploader', 'uploader_id'))
add('genre')
add('album')
add('album_artist')
add('disc', 'disc_number')
add('show', 'series')
add('season_number')
add('episode_id', ('episode', 'episode_id'))
add('episode_sort', 'episode_number')
if 'embed-metadata' in self.get_param('compat_opts', []):
add('comment', 'description')
metadata['common'].pop('synopsis', None)
meta_regex = rf'{re.escape(meta_prefix)}(?P<i>\d+)?_(?P<key>.+)'
for key, value in info.items():
mobj = re.fullmatch(meta_regex, key)
if value is not None and mobj:
metadata[mobj.group('i') or 'common'][mobj.group('key')] = value
for name, value in metadata['common'].items():
yield ('-metadata', f'{name}={value}')
stream_idx = 0
for fmt in info.get('requested_formats') or []:
stream_count = 2 if 'none' not in (fmt.get('vcodec'), fmt.get('acodec')) else 1
lang = ISO639Utils.short2long(fmt.get('language') or '') or fmt.get('language')
for i in range(stream_idx, stream_idx + stream_count):
if lang:
metadata[str(i)].setdefault('language', lang)
for name, value in metadata[str(i)].items():
yield (f'-metadata:s:{i}', f'{name}={value}')
stream_idx += stream_count
def _get_infojson_opts(self, info, infofn):
if not infofn or not os.path.exists(infofn):
if self._add_infojson is not True:
return
infofn = infofn or '%s.temp' % (
self._downloader.prepare_filename(info, 'infojson')
or replace_extension(self._downloader.prepare_filename(info), 'info.json', info['ext']))
if not self._downloader._ensure_dir_exists(infofn):
return
self.write_debug(f'Writing info-json to: {infofn}')
write_json_file(self._downloader.sanitize_info(info, self.get_param('clean_infojson', True)), infofn)
info['infojson_filename'] = infofn
old_stream, new_stream = self.get_stream_number(info['filepath'], ('tags', 'mimetype'), 'application/json')
if old_stream is not None:
yield ('-map', '-0:%d' % old_stream)
new_stream -= 1
yield ('-attach', infofn,
'-metadata:s:%d' % new_stream, 'mimetype=application/json')
class FFmpegMergerPP(FFmpegPostProcessor):
@PostProcessor._restrict_to(images=False)
def run(self, info):
filename = info['filepath']
temp_filename = prepend_extension(filename, 'temp')
args = ['-c', 'copy']
audio_streams = 0
for (i, fmt) in enumerate(info['requested_formats']):
if fmt.get('acodec') != 'none':
args.extend(['-map', f'{i}:a:0'])
aac_fixup = fmt['protocol'].startswith('m3u8') and self.get_audio_codec(fmt['filepath']) == 'aac'
if aac_fixup:
args.extend([f'-bsf:a:{audio_streams}', 'aac_adtstoasc'])
audio_streams += 1
if fmt.get('vcodec') != 'none':
args.extend(['-map', '%u:v:0' % (i)])
self.to_screen('Merging formats into "%s"' % filename)
self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
os.rename(encodeFilename(temp_filename), encodeFilename(filename))
return info['__files_to_merge'], info
def can_merge(self):
# TODO: figure out merge-capable ffmpeg version
if self.basename != 'avconv':
return True
required_version = '10-0'
if is_outdated_version(
self._versions[self.basename], required_version):
warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
'yt-dlp will download single file media. '
'Update %s to version %s or newer to fix this.') % (
self.basename, self.basename, required_version)
self.report_warning(warning)
return False
return True
class FFmpegFixupPostProcessor(FFmpegPostProcessor):
def _fixup(self, msg, filename, options):
temp_filename = prepend_extension(filename, 'temp')
self.to_screen(f'{msg} of "{filename}"')
self.run_ffmpeg(filename, temp_filename, options)
os.replace(temp_filename, filename)
class FFmpegFixupStretchedPP(FFmpegFixupPostProcessor):
@PostProcessor._restrict_to(images=False, audio=False)
def run(self, info):
stretched_ratio = info.get('stretched_ratio')
if stretched_ratio not in (None, 1):
self._fixup('Fixing aspect ratio', info['filepath'], [
*self.stream_copy_opts(), '-aspect', '%f' % stretched_ratio])
return [], info
class FFmpegFixupM4aPP(FFmpegFixupPostProcessor):
@PostProcessor._restrict_to(images=False, video=False)
def run(self, info):
if info.get('container') == 'm4a_dash':
self._fixup('Correcting container', info['filepath'], [*self.stream_copy_opts(), '-f', 'mp4'])
return [], info
class FFmpegFixupM3u8PP(FFmpegFixupPostProcessor):
def _needs_fixup(self, info):
yield info['ext'] in ('mp4', 'm4a')
yield info['protocol'].startswith('m3u8')
try:
metadata = self.get_metadata_object(info['filepath'])
except PostProcessingError as e:
self.report_warning(f'Unable to extract metadata: {e.msg}')
yield True
else:
yield traverse_obj(metadata, ('format', 'format_name'), casesense=False) == 'mpegts'
@PostProcessor._restrict_to(images=False)
def run(self, info):
if all(self._needs_fixup(info)):
self._fixup('Fixing MPEG-TS in MP4 container', info['filepath'], [
*self.stream_copy_opts(), '-f', 'mp4', '-bsf:a', 'aac_adtstoasc'])
return [], info
class FFmpegFixupTimestampPP(FFmpegFixupPostProcessor):
def __init__(self, downloader=None, trim=0.001):
# "trim" should be used when the video contains unintended packets
super(FFmpegFixupTimestampPP, self).__init__(downloader)
assert isinstance(trim, (int, float))
self.trim = str(trim)
@PostProcessor._restrict_to(images=False)
def run(self, info):
if not self._features.get('setts'):
self.report_warning(
'A re-encode is needed to fix timestamps in older versions of ffmpeg. '
'Please install ffmpeg 4.4 or later to fixup without re-encoding')
opts = ['-vf', 'setpts=PTS-STARTPTS']
else:
opts = ['-c', 'copy', '-bsf', 'setts=ts=TS-STARTPTS']
self._fixup('Fixing frame timestamp', info['filepath'], opts + [*self.stream_copy_opts(False), '-ss', self.trim])
return [], info
class FFmpegCopyStreamPP(FFmpegFixupPostProcessor):
MESSAGE = 'Copying stream'
@PostProcessor._restrict_to(images=False)
def run(self, info):
self._fixup(self.MESSAGE, info['filepath'], self.stream_copy_opts())
return [], info
class FFmpegFixupDurationPP(FFmpegCopyStreamPP):
MESSAGE = 'Fixing video duration'
class FFmpegFixupDuplicateMoovPP(FFmpegCopyStreamPP):
MESSAGE = 'Fixing duplicate MOOV atoms'
class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('srt', 'vtt', 'ass', 'lrc')
def __init__(self, downloader=None, format=None):
super(FFmpegSubtitlesConvertorPP, self).__init__(downloader)
self.format = format
def run(self, info):
subs = info.get('requested_subtitles')
new_ext = self.format
new_format = new_ext
if new_format == 'vtt':
new_format = 'webvtt'
if subs is None:
self.to_screen('There aren\'t any subtitles to convert')
return [], info
self.to_screen('Converting subtitles')
sub_filenames = []
for lang, sub in subs.items():
if not os.path.exists(sub.get('filepath', '')):
self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
continue
ext = sub['ext']
if ext == new_ext:
self.to_screen('Subtitle file for %s is already in the requested format' % new_ext)
continue
elif ext == 'json':
self.to_screen(
'You have requested to convert json subtitles into another format, '
'which is currently not possible')
continue
old_file = sub['filepath']
sub_filenames.append(old_file)
new_file = replace_extension(old_file, new_ext)
if ext in ('dfxp', 'ttml', 'tt'):
self.report_warning(
'You have requested to convert dfxp (TTML) subtitles into another format, '
'which results in style information loss')
dfxp_file = old_file
srt_file = replace_extension(old_file, 'srt')
with open(dfxp_file, 'rb') as f:
srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f:
f.write(srt_data)
old_file = srt_file
subs[lang] = {
'ext': 'srt',
'data': srt_data,
'filepath': srt_file,
}
if new_ext == 'srt':
continue
else:
sub_filenames.append(srt_file)
self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f:
subs[lang] = {
'ext': new_ext,
'data': f.read(),
'filepath': new_file,
}
info['__files_to_move'][new_file] = replace_extension(
info['__files_to_move'][sub['filepath']], new_ext)
return sub_filenames, info
class FFmpegSplitChaptersPP(FFmpegPostProcessor):
def __init__(self, downloader, force_keyframes=False):
FFmpegPostProcessor.__init__(self, downloader)
self._force_keyframes = force_keyframes
def _prepare_filename(self, number, chapter, info):
info = info.copy()
info.update({
'section_number': number,
'section_title': chapter.get('title'),
'section_start': chapter.get('start_time'),
'section_end': chapter.get('end_time'),
})
return self._downloader.prepare_filename(info, 'chapter')
def _ffmpeg_args_for_chapter(self, number, chapter, info):
destination = self._prepare_filename(number, chapter, info)
if not self._downloader._ensure_dir_exists(encodeFilename(destination)):
return
chapter['filepath'] = destination
self.to_screen('Chapter %03d; Destination: %s' % (number, destination))
return (
destination,
['-ss', compat_str(chapter['start_time']),
'-t', compat_str(chapter['end_time'] - chapter['start_time'])])
@PostProcessor._restrict_to(images=False)
def run(self, info):
chapters = info.get('chapters') or []
if not chapters:
self.to_screen('Chapter information is unavailable')
return [], info
in_file = info['filepath']
if self._force_keyframes and len(chapters) > 1:
in_file = self.force_keyframes(in_file, (c['start_time'] for c in chapters))
self.to_screen('Splitting video by chapters; %d chapters found' % len(chapters))
for idx, chapter in enumerate(chapters):
destination, opts = self._ffmpeg_args_for_chapter(idx + 1, chapter, info)
self.real_run_ffmpeg([(in_file, opts)], [(destination, self.stream_copy_opts())])
if in_file != info['filepath']:
os.remove(in_file)
return [], info
class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
SUPPORTED_EXTS = ('jpg', 'png')
def __init__(self, downloader=None, format=None):
super(FFmpegThumbnailsConvertorPP, self).__init__(downloader)
self.format = format
@staticmethod
def is_webp(path):
with open(encodeFilename(path), 'rb') as f:
b = f.read(12)
return b[0:4] == b'RIFF' and b[8:] == b'WEBP'
def fixup_webp(self, info, idx=-1):
thumbnail_filename = info['thumbnails'][idx]['filepath']
_, thumbnail_ext = os.path.splitext(thumbnail_filename)
if thumbnail_ext:
thumbnail_ext = thumbnail_ext[1:].lower()
if thumbnail_ext != 'webp' and self.is_webp(thumbnail_filename):
self.to_screen('Correcting thumbnail "%s" extension to webp' % thumbnail_filename)
webp_filename = replace_extension(thumbnail_filename, 'webp')
os.replace(thumbnail_filename, webp_filename)
info['thumbnails'][idx]['filepath'] = webp_filename
info['__files_to_move'][webp_filename] = replace_extension(
info['__files_to_move'].pop(thumbnail_filename), 'webp')
@staticmethod
def _options(target_ext):
if target_ext == 'jpg':
return ['-bsf:v', 'mjpeg2jpeg']
return []
def convert_thumbnail(self, thumbnail_filename, target_ext):
thumbnail_conv_filename = replace_extension(thumbnail_filename, target_ext)
self.to_screen('Converting thumbnail "%s" to %s' % (thumbnail_filename, target_ext))
self.real_run_ffmpeg(
[(thumbnail_filename, ['-f', 'image2', '-pattern_type', 'none'])],
[(thumbnail_conv_filename.replace('%', '%%'), self._options(target_ext))])
return thumbnail_conv_filename
def run(self, info):
files_to_delete = []
has_thumbnail = False
for idx, thumbnail_dict in enumerate(info.get('thumbnails') or []):
original_thumbnail = thumbnail_dict.get('filepath')
if not original_thumbnail:
continue
has_thumbnail = True
self.fixup_webp(info, idx)
_, thumbnail_ext = os.path.splitext(original_thumbnail)
if thumbnail_ext:
thumbnail_ext = thumbnail_ext[1:].lower()
if thumbnail_ext == 'jpeg':
thumbnail_ext = 'jpg'
if thumbnail_ext == self.format:
self.to_screen('Thumbnail "%s" is already in the requested format' % original_thumbnail)
continue
thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, self.format)
files_to_delete.append(original_thumbnail)
info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension(
info['__files_to_move'][original_thumbnail], self.format)
if not has_thumbnail:
self.to_screen('There aren\'t any thumbnails to convert')
return files_to_delete, info
class FFmpegConcatPP(FFmpegPostProcessor):
def __init__(self, downloader, only_multi_video=False):
self._only_multi_video = only_multi_video
super().__init__(downloader)
def concat_files(self, in_files, out_file):
if len(in_files) == 1:
if os.path.realpath(in_files[0]) != os.path.realpath(out_file):
self.to_screen(f'Moving "{in_files[0]}" to "{out_file}"')
os.replace(in_files[0], out_file)
return []
codecs = [traverse_obj(self.get_metadata_object(file), ('streams', ..., 'codec_name')) for file in in_files]
if len(set(map(tuple, codecs))) > 1:
raise PostProcessingError(
'The files have different streams/codecs and cannot be concatenated. '
'Either select different formats or --recode-video them to a common format')
self.to_screen(f'Concatenating {len(in_files)} files; Destination: {out_file}')
super().concat_files(in_files, out_file)
return in_files
@PostProcessor._restrict_to(images=False)
def run(self, info):
entries = info.get('entries') or []
if (self.get_param('skip_download') or not any(entries)
or self._only_multi_video and info['_type'] != 'multi_video'):
return [], info
elif any(len(entry) > 1 for entry in traverse_obj(entries, (..., 'requested_downloads')) or []):
raise PostProcessingError('Concatenation is not supported when downloading multiple separate formats')
in_files = traverse_obj(entries, (..., 'requested_downloads', 0, 'filepath'))
if len(in_files) < len(entries):
raise PostProcessingError('Aborting concatenation because some downloads failed')
ie_copy = self._downloader._playlist_infodict(info)
exts = traverse_obj(entries, (..., 'requested_downloads', 0, 'ext'), (..., 'ext'))
ie_copy['ext'] = exts[0] if len(set(exts)) == 1 else 'mkv'
out_file = self._downloader.prepare_filename(ie_copy, 'pl_video')
files_to_delete = self.concat_files(in_files, out_file)
info['requested_downloads'] = [{
'filepath': out_file,
'ext': ie_copy['ext'],
}]
return files_to_delete, info
| 41.325661
| 137
| 0.592532
|
5ec0466b441bfe0aeba6d4ce79954df811d7a927
| 1,472
|
py
|
Python
|
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-iu.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | 47
|
2020-03-08T08:43:28.000Z
|
2022-03-18T18:51:55.000Z
|
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-iu.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | null | null | null |
SWIM-Executables/Windows/pyinstaller-2.0 for windows/PyInstaller/hooks/hook-iu.py
|
alexsigaras/SWIM
|
1a35df8acb26bdcb307a1b8f60e9feba68ed1715
|
[
"MIT"
] | 16
|
2020-03-08T08:43:30.000Z
|
2022-01-10T22:05:57.000Z
|
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import sys
def hook(mod):
names = sys.builtin_module_names
if 'posix' in names:
removes = ['nt', 'dos', 'os2', 'mac', 'win32api']
elif 'nt' in names:
removes = ['dos', 'os2', 'mac']
elif 'os2' in names:
removes = ['nt', 'dos', 'mac', 'win32api']
elif 'dos' in names:
removes = ['nt', 'os2', 'mac', 'win32api']
elif 'mac' in names:
removes = ['nt', 'dos', 'os2', 'win32api']
mod.imports = [m
for m in mod.imports
# if first part of module-name not in removes
if m[0].split('.', 1)[0] not in removes
]
return mod
| 38.736842
| 82
| 0.648098
|
db80f1222ca48a327a6aceebd3be93cc50db8838
| 1,944
|
py
|
Python
|
benchmarks/Evolution/both/evo_tests/test_cases/test_verify_player_choices.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 3
|
2018-08-03T02:41:29.000Z
|
2021-03-19T03:18:47.000Z
|
benchmarks/Evolution/both/evo_tests/test_cases/test_verify_player_choices.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 3
|
2018-02-04T17:53:56.000Z
|
2018-11-10T17:06:57.000Z
|
benchmarks/Evolution/both/evo_tests/test_cases/test_verify_player_choices.py
|
nuprl/retic_performance
|
621211c2f40251ce5364c33e72e4067e34a32013
|
[
"MIT"
] | 1
|
2018-08-04T00:14:12.000Z
|
2018-08-04T00:14:12.000Z
|
import unittest
from evo_tests.examples import ExamplePlayerStates
from cardplay import *
from evolution.player.player_feeding_choice import *
class TestTraitCards(unittest.TestCase):
def setUp(self):
self.ex_player_states = ExamplePlayerStates()
def test_verify_card_choices(self):
ps = self.ex_player_states.step4_2_p2
ps2 = self.ex_player_states.step4_2_p3
assert ps.verify_card_choices((0, [ExchangeForBodySize(1, 0)]))
assert ps2.verify_card_choices((0, [ExchangeForSpecies(1, [2])]))
assert ps2.verify_card_choices((0, [ExchangeForPopulation(1, 0)]))
assert not ps.verify_card_choices((0, [ExchangeForSpecies(0, [0])]))
assert not ps.verify_card_choices((1, [ExchangeForSpecies(10, [1, 1, 1])]))
assert not ps.verify_card_choices((1, [ExchangeForPopulation(0, 1), ExchangeForPopulation(0, 1)]))
assert not ps.verify_card_choices((1, [ExchangeForSpecies(0, [1, 1]), ExchangeForPopulation(0, 1)]))
assert not ps.verify_card_choices((1, [ExchangeForBodySize(0, 1), ExchangeForPopulation(0, 1)]))
def test_verify_feed_choices(self):
ps1 = self.ex_player_states.fat5
ps2 = self.ex_player_states.fat3
ps3 = self.ex_player_states.burr_veg
ps4 = self.ex_player_states.carn
ps5 = self.ex_player_states.norm
assert PlayerFeedVegetarian(0).verify_self([ps4, ps3], ps5)
assert PlayerStoreFat(0, 1).verify_self([], ps1)
assert PlayerAttackWithCarnivore(0, 0, 0).verify_self([ps2], ps4)
assert not PlayerFeedVegetarian(0).verify_self([ps2], ps3)
assert not PlayerStoreFat(0, 100).verify_self([ps3], ps2)
assert not PlayerStoreFat(100, 1).verify_self([ps3], ps2)
assert not PlayerAttackWithCarnivore(0, 0, 1).verify_self([ps2], ps4)
assert not PlayerAttackWithCarnivore(10, 0, 20).verify_self([ps2], ps4)
if __name__ == '__main__':
main()
| 47.414634
| 108
| 0.699074
|
788a33d842e527991257ea6d58044df5ec402e1c
| 446
|
py
|
Python
|
astr-119-hw-2/exceptions.py
|
emabau/astr-119
|
e73c8598f8022e734fb94ed266caf3e25e7735d9
|
[
"MIT"
] | null | null | null |
astr-119-hw-2/exceptions.py
|
emabau/astr-119
|
e73c8598f8022e734fb94ed266caf3e25e7735d9
|
[
"MIT"
] | 5
|
2021-09-29T17:26:35.000Z
|
2021-12-08T18:25:15.000Z
|
astr-119-hw-2/exceptions.py
|
emabau/astr-119
|
e73c8598f8022e734fb94ed266caf3e25e7735d9
|
[
"MIT"
] | null | null | null |
#Python exceptions lets you deal with
#unexpected results
try:
print(a) #this will throw an exception since a is not defined
except:
print("a is not defined!")
#there are specific errors to help with cases
try:
print(a) #this will throw an exception since a is not defined
except NameError:
print("a is still not defined!")
except:
print("Something else went wrong.")
#this will break our program
#since a is not defined
print(a)
| 23.473684
| 64
| 0.737668
|
ea0392add7a5d3712aa84f13100db6aef404eb04
| 8,206
|
py
|
Python
|
katsdpservices/test/test_argparse.py
|
ska-sa/katsdpservices
|
707799a97e7f09b273644988970afb221afb7851
|
[
"BSD-3-Clause"
] | null | null | null |
katsdpservices/test/test_argparse.py
|
ska-sa/katsdpservices
|
707799a97e7f09b273644988970afb221afb7851
|
[
"BSD-3-Clause"
] | 5
|
2020-01-23T14:06:12.000Z
|
2021-06-28T11:25:32.000Z
|
katsdpservices/test/test_argparse.py
|
ska-sa/katsdpservices
|
707799a97e7f09b273644988970afb221afb7851
|
[
"BSD-3-Clause"
] | null | null | null |
################################################################################
# Copyright (c) 2017-2020, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for :mod:`katsdpservices.argparse`."""
import unittest
from unittest import mock
from katsdptelstate.endpoint import Endpoint
from katsdpservices import ArgumentParser
class MockException(Exception):
"""Exception class used for monkey-patching functions that don't return."""
pass
class TestArgumentParser(unittest.TestCase):
def _stub_get(self, name, default=None):
return self.data.get(name, default)
def setUp(self):
# Set up a mock version of TelescopeState which applies for the whole test
patcher = mock.patch('katsdptelstate.TelescopeState', autospec=True)
self.addCleanup(patcher.stop)
self.TelescopeState = patcher.start()
self.TelescopeState.return_value.get = mock.MagicMock(side_effect=self._stub_get)
# Create a fixture
self.parser = ArgumentParser()
self.parser.add_argument('positional', type=str)
self.parser.add_argument('--int-arg', type=int, default=5)
self.parser.add_argument('--float-arg', type=float, default=3.5)
self.parser.add_argument('--no-default', type=str)
self.parser.add_argument('--bool-arg', action='store_true', default=False)
group = self.parser.add_argument_group('Group options')
group.add_argument('--group-int', type=int, default=6)
mutual = self.parser.add_mutually_exclusive_group()
mutual.add_argument('--mutual-x', default='x')
mutual.add_argument('--mutual-y', default='y')
self.data = {
'config': {
'int_arg': 10,
'float_arg': 4.5,
'no_default': 'telstate',
'bool_arg': True,
'not_arg': 'should not be seen',
'telstate': 'example.org',
'group_int': 11,
'mutual_y': 'z',
'level1': {
'int_arg': 11,
'level2': {
'float_arg': 5.5
}
}
},
'config.level1.level2': {
'float_arg': 12.5
}
}
def test_no_telstate(self):
"""Passing explicit arguments but no telescope model sets the arguments."""
args = self.parser.parse_args(
['hello', '--int-arg=3', '--float-arg=2.5', '--no-default=test', '--bool-arg',
'--group-int=11', '--mutual-y=z'])
self.assertIsNone(args.telstate)
self.assertIsNone(args.telstate_endpoint)
self.assertEqual('', args.name)
self.assertEqual('hello', args.positional)
self.assertEqual(3, args.int_arg)
self.assertEqual(2.5, args.float_arg)
self.assertEqual('test', args.no_default)
self.assertEqual(True, args.bool_arg)
self.assertEqual(11, args.group_int)
self.assertEqual('z', args.mutual_y)
def test_no_telstate_defaults(self):
"""Passing no optional arguments sets those arguments to default"""
args = self.parser.parse_args(['hello'])
self.assertIsNone(args.telstate)
self.assertEqual('', args.name)
self.assertEqual('hello', args.positional)
self.assertEqual(5, args.int_arg)
self.assertEqual(3.5, args.float_arg)
self.assertIsNone(args.no_default)
self.assertEqual(False, args.bool_arg)
self.assertEqual(6, args.group_int)
self.assertEqual('y', args.mutual_y)
def test_telstate_no_name(self):
"""Passing --telstate but not --name loads from root config"""
args = self.parser.parse_args(['hello', '--telstate=example.com'])
self.assertIs(self.TelescopeState.return_value, args.telstate)
self.assertEqual(Endpoint('example.com', 6379), args.telstate_endpoint)
self.assertEqual('', args.name)
self.assertEqual(10, args.int_arg)
self.assertEqual(4.5, args.float_arg)
self.assertEqual('telstate', args.no_default)
self.assertEqual(True, args.bool_arg)
self.assertEqual(11, args.group_int)
self.assertEqual('z', args.mutual_y)
self.assertNotIn('help', vars(args))
self.assertNotIn('not_arg', vars(args))
def test_telstate_nested(self):
"""Passing a nested name loads from all levels of the hierarchy"""
args = self.parser.parse_args(['hello', '--telstate=example.com', '--name=level1.level2'])
self.assertIs(self.TelescopeState.return_value, args.telstate)
self.assertEqual(Endpoint('example.com', 6379), args.telstate_endpoint)
self.assertEqual('level1.level2', args.name)
self.assertEqual('hello', args.positional)
self.assertEqual(11, args.int_arg)
self.assertEqual(12.5, args.float_arg)
self.assertEqual('telstate', args.no_default)
def test_telstate_override(self):
"""Command-line parameters override telescope state"""
args = self.parser.parse_args(
['hello', '--int-arg=0', '--float-arg=0', '--telstate=example.com',
'--name=level1.level2', '--group-int=15'])
self.assertIs(self.TelescopeState.return_value, args.telstate)
self.assertEqual('level1.level2', args.name)
self.assertEqual('hello', args.positional)
self.assertEqual(0, args.int_arg)
self.assertEqual(0.0, args.float_arg)
self.assertEqual('telstate', args.no_default)
self.assertEqual(15, args.group_int)
def test_default_telstate(self):
"""Calling `set_default` with `telstate` keyword works"""
self.parser.set_defaults(telstate='example.com')
args = self.parser.parse_args(['hello'])
self.TelescopeState.assert_called_once_with('example.com')
self.assertIs(self.TelescopeState.return_value, args.telstate)
self.assertEqual(10, args.int_arg)
def test_convert_argument(self):
"""String argument in telescope state that must be converted to the appropriate type."""
self.data['config']['int_arg'] = '50'
args = self.parser.parse_args(['--telstate=example.com', 'hello'])
self.assertEqual(50, args.int_arg)
def test_bad_argument(self):
"""String argument in telescope state that cannot be converted must raise an error."""
self.data['config']['int_arg'] = 'not an int'
# We make the mock raise an exception, since the patched code is not
# expecting the function to return.
with mock.patch.object(
self.parser, 'error', autospec=True, side_effect=MockException) as mock_error:
with self.assertRaises(MockException):
self.parser.parse_args(['--telstate=example.com', 'hello'])
mock_error.assert_called_once_with(mock.ANY)
def test_help(self):
"""Passing --help prints help without trying to construct the telescope state"""
# We make the mock raise an exception, since the patched code is not
# expecting the function to return.
with mock.patch.object(
self.parser, 'exit', autospec=True, side_effect=MockException) as mock_exit:
with self.assertRaises(MockException):
self.parser.parse_args(['--telstate=example.com', '--help'])
mock_exit.assert_called_once_with()
# Make sure we did not try to construct a telescope state
self.assertEqual([], self.TelescopeState.call_args_list)
| 45.588889
| 98
| 0.630636
|
1fc7d4583c7d4494f5c188c597fc62e1c3d36914
| 13,586
|
py
|
Python
|
backend/docs/autoapi.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 7
|
2018-05-20T08:56:08.000Z
|
2022-03-11T15:50:54.000Z
|
backend/docs/autoapi.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:12:51.000Z
|
2022-01-13T01:25:27.000Z
|
backend/docs/autoapi.py
|
deti/boss
|
bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869
|
[
"Apache-2.0"
] | 5
|
2016-10-09T14:52:09.000Z
|
2020-12-25T01:04:35.000Z
|
import inspect
import re
from operator import attrgetter
import sphinx.ext.autodoc as autodoc
from docutils import nodes
from sphinx.domains.python import PythonDomain, PyObject, TypedField, GroupedField, l_, _
from sphinx.ext.autosummary import Autosummary, autosummary_table, ViewList, \
get_import_prefixes_from_env, import_by_name, get_documenter, mangle_signature
from sphinx import addnodes
from sphinx.util.docstrings import prepare_docstring
from sphinx.util import force_decode
class AutoAPI(autodoc.ModuleDocumenter):
objtype = 'api'
priority = 11
directivetype = 'module'
domain = "pyapi"
def resolve_name(self, modname, parents, path, base):
return modname, parents + [base]
def get_object_members(self, want_all):
routes = self.object().routes
ret = {}
for r in routes:
callback = r.callback
rule = r.rule
method = r.method
try:
clas = r.callback.__self__.__class__
except AttributeError:
continue # skip functions
else:
ret.setdefault(clas, []).append(("%s %s" % (method, rule), callback))
return False, ret
def document_members(self, all_members=False):
want_all = all_members or self.options.inherited_members or self.options.members is autodoc.ALL
# find out which members are documentable
members_check_module, members_set = self.get_object_members(want_all)
resources_already_met = set()
for clas in sorted(members_set.keys(), key=attrgetter("__name__")):
members = members_set[clas]
processed_members = []
for member in members:
if isinstance(member, (tuple, list)):
resource, func, rest = member[0], member[1], member[2:]
if hasattr(func, '_mapped_to') and resource.rstrip("/") not in resources_already_met:
resources_already_met.add(resource)
processed_members.append((resource, func) + rest)
else:
processed_members.append(members)
# document non-skipped members
memberdocumenters = []
for (mname, member, isattr) in self.filter_members(processed_members, want_all):
classes = [cls for cls in autodoc.AutoDirective._registry.values()
if cls.can_document_member(member, mname, isattr, self)]
if not classes:
# don't know how to document this member
continue
# prefer the documenter with the highest priority
classes.sort(key=lambda cls: cls.priority)
documenter = classes[-1](self.directive, mname, self.indent)
memberdocumenters.append((documenter, isattr, member))
member_order = self.options.member_order or self.env.config.autodoc_member_order
if member_order == 'groupwise':
# sort by group; relies on stable sort to keep items in the
# same group sorted alphabetically
memberdocumenters.sort(key=lambda e: e[0].member_order)
elif member_order == 'bysource' and self.analyzer:
# sort by source order, by virtue of the module analyzer
tagorder = self.analyzer.tagorder
def keyfunc(entry):
fullname = entry[0].name.split('::')[1]
return tagorder.get(fullname, len(tagorder))
memberdocumenters.sort(key=keyfunc)
else:
# sort by name
memberdocumenters.sort(key=lambda e: e[2]._mapped_to.keys())
self.add_line(u'', '<autodoc>')
self.add_line(clas.__name__, '<autodoc>')
self.add_line("*" * len(clas.__name__), '<autodoc>')
docstring = self.get_attr(clas, '__doc__', None)
if docstring:
if isinstance(docstring, str):
docstring = prepare_docstring(docstring)
elif docstring:
docstring = prepare_docstring(force_decode(docstring, None))
for s in docstring:
self.add_line(s, '<autodoc>')
for documenter, isattr, member in memberdocumenters:
documenter.object = member
documenter.generate_api(
all_members=True, real_modname=self.real_modname,
check_module=members_check_module and not isattr)
self.add_line("-----", '<autodoc>')
class AutoAPIMethod(autodoc.MethodDocumenter):
objtype = 'function_api'
directivetype = 'function'
priority = 11
member_order = 40
domain = "pyapi"
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return inspect.isroutine(member) and ' ' in membername
def format_name(self):
# normally the name doesn't contain the module (except for module
# directives of course)
self.fullname = self.name
return self.name
def generate_api(self, more_content=None, real_modname=None, check_module=False, all_members=False):
self.real_modname = None
self.objpath = []
self.analyzer = None
# TODO added analyzer and file name
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
self.add_line(u'', '<autodoc>')
# format the object's signature, if any
sig = self.format_signature()
# generate the directive header and options, if applicable
self.add_directive_header(sig)
self.add_line(u'', '<autodoc>')
# e.g. the module directive doesn't have content
self.indent += self.content_indent
# add all content (from docstrings, attribute docs etc.)
self.add_content(more_content)
# document members, if possible
self.document_members(all_members)
class PythonAPIDomain(PythonDomain):
name = "pyapi"
def __init__(self, *arg, **kwargs):
super(PythonAPIDomain, self).__init__(*arg, **kwargs)
self.directives['function'] = PyAPILevel
class PyAPILevel(PyObject):
"""
Description of an object on module level (functions, data).
"""
def needs_arglist(self):
return True
def get_index_text(self, modname, name_cls):
if not modname:
return _('%s() (built-in function)') % name_cls[0]
return _('%s() (in module %s)') % (name_cls[0], modname)
doc_field_types = \
[
TypedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwarg', 'kwparam'),
typerolename='obj', typenames=('paramtype', 'type'),
can_collapse=True),
TypedField('variable', label=l_('Variables'), rolename='obj',
names=('var', 'ivar', 'cvar'),
typerolename='obj', typenames=('vartype',),
can_collapse=True),
GroupedField('exceptions', label=l_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True),
TypedField('return', label=l_('Returns'), # has_arg=False,
names=('returns', 'return', 'rv'), typenames=('returntype', ),
typerolename='obj', can_collapse=True),
# TypedField('returntype', label=l_('Return type'), # has_arg=False,
# names=('rtype',)),
]
def handle_signature(self, sig, signode):
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
# determine module and class name (if applicable), as well as full name
from sphinx import addnodes
r = re.compile(r"(\w+)\s+([^\(]+)\s*\((.*)\)")
m = r.match(sig)
if m is None:
raise ValueError
method, name, arglist = m.groups()
modname = self.options.get(
'module', self.env.temp_data.get('py:module'))
classname = ''
fullname = "%s %s" % (method, name)
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
signode += addnodes.desc_name(fullname, fullname)
if not arglist:
return fullname, ''
from sphinx.domains.python import _pseudo_parse_arglist
_pseudo_parse_arglist(signode, arglist)
return fullname, ''
def process_signature(app, what, name, obj, options, signature, return_annotation):
if what != 'function_api':
return None
doc = obj.__doc__
if not doc:
return None
r = re.compile(":param\s+(\w+\s)*(\w+):")
matches = r.findall(doc)
sig = [m[1] for m in matches]
return "(%s)" % (", ".join(sig), ), ""
class ApiList(Autosummary):
def get_table(self, items):
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = 'll'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts):
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
vl = ViewList()
vl.append(text, '<autosummary>')
self.state.nested_parse(vl, 0, node)
try:
if isinstance(node[0], nodes.paragraph):
node = node[0]
except IndexError:
pass
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, name, sig)
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def get_items(self, names):
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
env = self.state.document.settings.env
prefixes = get_import_prefixes_from_env(env)
items = []
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent = import_by_name(name, prefixes=prefixes)
except ImportError:
self.warn('failed to import %s' % name)
items.append((name, '', '', name))
continue
# NB. using real_name here is important, since Documenters
# handle module prefixes slightly differently
documenter = get_documenter(obj, parent)(self, real_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
continue
if not documenter.import_object():
self.warn('failed to import object %s' % real_name)
items.append((display_name, '', '', real_name))
continue
display_name = documenter.format_name()
# -- Grab the signature
sig = documenter.format_signature()
if not sig:
sig = ''
else:
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature(sig, max_chars=max_chars)
sig = sig.replace('*', r'\*')
# -- Grab the summary
doc = list(documenter.process_doc(documenter.get_doc()))
while doc and not doc[0].strip():
doc.pop(0)
m = re.search(r"^([A-Z][^A-Z]*?\.\s)", " ".join(doc).strip())
if m:
summary = m.group(1).strip()
elif doc:
summary = doc[0].strip()
else:
summary = ''
items.append((display_name, sig, summary, real_name))
return items
def setup(app):
app.add_domain(PythonAPIDomain)
app.add_autodocumenter(AutoAPI)
app.add_autodocumenter(AutoAPIMethod)
app.connect('autodoc-process-signature', process_signature)
# app.add_directive('apilist', ApiList)
| 36.818428
| 105
| 0.569042
|
d7eeccab3564ac790fe5cbe14661b01dd7915de6
| 8,684
|
py
|
Python
|
docs/conf.py
|
leniartek/trino-admin
|
05104a0b35bbc4aeca9469b2fc63a21c814a7855
|
[
"Apache-2.0"
] | 34
|
2016-01-08T21:02:13.000Z
|
2017-03-10T02:01:03.000Z
|
docs/conf.py
|
starburstdata/presto-admin
|
1bb652debefe1e26e9105f8ffb08a8793790967a
|
[
"Apache-2.0"
] | 19
|
2019-05-16T13:09:25.000Z
|
2020-12-04T18:01:39.000Z
|
docs/conf.py
|
starburstdata/presto-admin
|
1bb652debefe1e26e9105f8ffb08a8793790967a
|
[
"Apache-2.0"
] | 15
|
2019-03-07T16:37:06.000Z
|
2020-11-12T12:07:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# presto-admin documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import prestoadmin
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'presto-admin'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = prestoadmin.__version__
# The full version, including alpha/beta/rc tags.
release = prestoadmin.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'prestoadmindoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'prestoadmin.tex',
u'presto-admin Documentation',
'', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'prestoadmin',
u'presto-admin Documentation',
[u''], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'prestoadmin',
u'presto-admin Documentation',
u'',
'prestoadmin',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.685512
| 81
| 0.717066
|
3f693a64e7ea3613dcaea9a9aa5bf7d38150c25d
| 11,504
|
py
|
Python
|
intersight/model/hyperflex_health_check_execution_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/hyperflex_health_check_execution_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/hyperflex_health_check_execution_list.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.hyperflex_health_check_execution import HyperflexHealthCheckExecution
from intersight.model.hyperflex_health_check_execution_list_all_of import HyperflexHealthCheckExecutionListAllOf
from intersight.model.mo_base_response import MoBaseResponse
globals()['HyperflexHealthCheckExecution'] = HyperflexHealthCheckExecution
globals()['HyperflexHealthCheckExecutionListAllOf'] = HyperflexHealthCheckExecutionListAllOf
globals()['MoBaseResponse'] = MoBaseResponse
class HyperflexHealthCheckExecutionList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([HyperflexHealthCheckExecution], none_type,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""HyperflexHealthCheckExecutionList - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'hyperflex.HealthCheckExecution' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([HyperflexHealthCheckExecution], none_type): The array of 'hyperflex.HealthCheckExecution' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
HyperflexHealthCheckExecutionListAllOf,
MoBaseResponse,
],
'oneOf': [
],
}
| 48.133891
| 1,678
| 0.64282
|
bab143d65a07f24f2884046ca9526176a47e18da
| 2,801
|
py
|
Python
|
examples/modules/extractors/plot_2_working_with_unscaled_traces.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 116
|
2019-07-12T14:33:43.000Z
|
2022-03-29T01:10:00.000Z
|
examples/modules/extractors/plot_2_working_with_unscaled_traces.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 424
|
2019-07-15T13:29:34.000Z
|
2022-03-30T13:30:45.000Z
|
examples/modules/extractors/plot_2_working_with_unscaled_traces.py
|
khl02007/spikeinterface
|
a29739a2ecc1d27af0fdb7adfed895aa9fdd0be4
|
[
"MIT"
] | 60
|
2019-08-26T11:59:07.000Z
|
2022-03-24T20:05:38.000Z
|
'''
Working with unscaled traces
============================
Some file formats store data in convenient types that require offsetting and scaling in order to convert the
traces to uV. This example shows how to work with unscaled and scaled traces int :code:`spikeinterface.extractors`
module.
'''
import numpy as np
import matplotlib.pyplot as plt
import spikeinterface.extractors as se
##############################################################################
# First, let's create some traces in unsigned int16 type. Assuming the ADC output of our recording system has 10 bits,
# the values will be between 0 and 1024. Let's assume our signal is centered at 512 and it has a standard deviation
# of 50 bits
sampling_frequency = 30000
traces = 512 + 50 * np.random.randn(10 * sampling_frequency, 4)
traces = traces.astype("uint16")
###############################################################################
# Let's now instantiate a :code:`NumpyRecordingExtractor` with the traces we just created
recording = se.NumpyRecording([traces], sampling_frequency=sampling_frequency)
print(f"Traces dtype: {recording.get_dtype()}")
###############################################################################
# Since our ADC samples between 0 and 1024, we need to convert to uV. To do so, we need to transform the traces as:
# traces_uV = traces_raw * gains + offset
#
# Let's assume that our gain (i.e. the value of each bit) is 0.1, so that our voltage range is between 0 and 1024*0.1.
# We also need an offset to center the traces around 0. The offset will be: - 2^(10-1) * gain = -512 * gain
# (where 10 is the number of bits of our ADC)
gain = 0.1
offset = -2 ** (10 - 1) * gain
###############################################################################
# We are now ready to set gains and offsets to our extractor. We also have to set the :code:`has_unscaled` field to
# :code:`True`:
recording.set_channel_gains(gain)
recording.set_channel_offsets(offset)
###############################################################################
# Internally this gains and offsets are handle with properties
# So the gain could be "by channel".
print(recording.get_property('gain_to_uV'))
print(recording.get_property('offset_to_uV'))
###############################################################################
# With gains and offset information, we can retrieve traces both in their unscaled (raw) type, and in their scaled
# type:
traces_unscaled = recording.get_traces(return_scaled=False)
traces_scaled = recording.get_traces(return_scaled=True) # return_scaled is True by default
print(f"Traces dtype after scaling: {traces_scaled.dtype}")
plt.plot(traces_unscaled[:, 0], label="unscaled")
plt.plot(traces_scaled[:, 0], label="scaled")
plt.legend()
plt.show()
| 40.594203
| 118
| 0.620136
|
209e545dd7db42da11520f247d9d49c6cb8308c9
| 293
|
py
|
Python
|
students/k3342/laboratory_works/Reybandt Alexandr/laboratiry_work_1/hwboard/hwboard/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 10
|
2020-03-20T09:06:12.000Z
|
2021-07-27T13:06:02.000Z
|
students/k3342/laboratory_works/Reybandt Alexandr/laboratiry_work_1/hwboard/hwboard/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 134
|
2020-03-23T09:47:48.000Z
|
2022-03-12T01:05:19.000Z
|
students/k3342/laboratory_works/Reybandt Alexandr/laboratiry_work_1/hwboard/hwboard/urls.py
|
TonikX/ITMO_ICT_-WebProgramming_2020
|
ba566c1b3ab04585665c69860b713741906935a0
|
[
"MIT"
] | 71
|
2020-03-20T12:45:56.000Z
|
2021-10-31T19:22:25.000Z
|
from django.contrib import admin
from django.urls import path
from django.urls import include
from .views import redirect_hw
urlpatterns = [
path('', redirect_hw),
path('admin/', admin.site.urls),
path('hw/', include('hw.urls')),
path('accounts/', include('allauth.urls'))
]
| 22.538462
| 46
| 0.68942
|
1d45d969892e710615dd26f82a78e4c5e476eef0
| 8,181
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations_async/_load_balancer_probes_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2019-05-17T21:24:53.000Z
|
2020-02-12T11:13:42.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations_async/_load_balancer_probes_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 15
|
2019-07-12T18:18:04.000Z
|
2019-07-25T20:55:51.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations_async/_load_balancer_probes_operations_async.py
|
LianwMS/azure-sdk-for-python
|
612d7bca9de86ee1bd1fa59291d7bf897ba9213f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerProbesOperations:
"""LoadBalancerProbesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs
) -> AsyncIterable["models.LoadBalancerProbeListResult"]:
"""Gets all the load balancer probes.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerProbeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.LoadBalancerProbeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerProbeListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerProbeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
probe_name: str,
**kwargs
) -> "models.Probe":
"""Gets load balancer probe.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param probe_name: The name of the probe.
:type probe_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Probe, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.Probe
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Probe"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'probeName': self._serialize.url("probe_name", probe_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Probe', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'} # type: ignore
| 47.017241
| 192
| 0.665689
|
af3c279246ef6d3fd9fd84dc1cdac62f4a38b33c
| 3,607
|
py
|
Python
|
jarviscli/plugins/trivia.py
|
snehkhajanchi/Jarvis
|
3d00fbeca8bb7cd05cf6e69169dbd18290a8ae07
|
[
"MIT"
] | 1
|
2020-08-07T06:08:18.000Z
|
2020-08-07T06:08:18.000Z
|
jarviscli/plugins/trivia.py
|
snehkhajanchi/Jarvis
|
3d00fbeca8bb7cd05cf6e69169dbd18290a8ae07
|
[
"MIT"
] | null | null | null |
jarviscli/plugins/trivia.py
|
snehkhajanchi/Jarvis
|
3d00fbeca8bb7cd05cf6e69169dbd18290a8ae07
|
[
"MIT"
] | null | null | null |
from plugin import plugin, require
import requests
@require(network=True)
@plugin('trivia')
class trivia:
errCode = "An error occurred. Please try again later."
"""
Usage: Type trivia and follow the instructions.
This plugin gives you trivia questions (mcq or true/false)
for you to test your trivia knowledge
"""
def __call__(self, jarvis, s):
trivia_fetch = self.get_trivia(jarvis)
question_type = trivia_fetch["results"][0]["type"]
options = trivia_fetch["results"][0]["incorrect_answers"]
if trivia_fetch is not None:
if(question_type == "multiple"):
self.mcq_question(jarvis, trivia_fetch)
else:
self.true_false_question(jarvis, trivia_fetch)
def get_trivia(self, jarvis):
"""
function creates request to api and fetches the corresponding data
"""
url = "https://opentdb.com/api.php?amount=1"
r = requests.get(url)
return r.json()
def true_false_question(self, jarvis, trivia_fetch):
response_code = trivia_fetch["response_code"]
if (response_code != 0):
jarvis.say(errCode)
return
else:
question = trivia_fetch["results"][0]["question"]
question = question.replace(""", "\"")
jarvis.say("True/False: " + question)
options = ["true", "false"]
correct = trivia_fetch["results"][0]["correct_answer"]
correct = correct.lower()
self.true_false_answer(jarvis, options, correct)
def true_false_answer(self, jarvis, options, correctAnswer):
answerPrompt = "Please enter either \'true\' or \'false\'"
answer = (jarvis.input(answerPrompt + "\n")).lower()
while answer not in options:
jarvis.say("Invalid option")
answer = (jarvis.input(answerPrompt + "\n")).lower()
if (answer == correctAnswer):
jarvis.say("Correct!!")
else:
jarvis.say("Sorry, that's incorrect")
def mcq_question(self, jarvis, trivia_fetch):
response_code = trivia_fetch["response_code"]
if (response_code != 0):
jarvis.say(errCode)
return
else:
question = trivia_fetch["results"][0]["question"]
question = question.replace(""", "\"")
question = question.replace(''', "'")
jarvis.say("Multiple Choice: " + question)
options = trivia_fetch["results"][0]["incorrect_answers"]
correct_answer = trivia_fetch["results"][0]["correct_answer"]
options.append(correct_answer)
options.sort()
option_count = 0
answersDict = {}
for option in options:
option_count = option_count + 1
answersDict[str(option_count)] = option
jarvis.say(str(option_count) + ". " + option)
self.mcq_answer(jarvis, answersDict, correct_answer, option_count)
return
def mcq_answer(self, jarvis, answersDict, correctAnswer, maxCount):
answerPrompt = "Please enter an integer 1-" + str(maxCount)
answer = jarvis.input(answerPrompt + "\n")
while answer not in answersDict.keys():
jarvis.say("Invalid option")
answer = jarvis.input(answerPrompt + "\n")
userAnswer = answersDict[answer]
if (userAnswer == correctAnswer):
jarvis.say("Correct!!")
else:
jarvis.say("Sorry, the correct answer was " + correctAnswer)
| 39.206522
| 78
| 0.591073
|
50ffa5ae605ec2d4d707bfdf705ffaec9a872170
| 8,076
|
py
|
Python
|
contrib/devtools/update-translations.py
|
FARGOCASHPAY/FARGOCASHCOIN
|
a5ec8cfc31c5a8c897d63b4f3fff110d2defe38b
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
FARGOCASHPAY/FARGOCASHCOIN
|
a5ec8cfc31c5a8c897d63b4f3fff110d2defe38b
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
FARGOCASHPAY/FARGOCASHCOIN
|
a5ec8cfc31c5a8c897d63b4f3fff110d2defe38b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'fargocash_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# If both numeric format specifiers and "others" are used, assume we're dealing
# with a Qt-formatted message. In the case of Qt formatting (see https://doc.qt.io/qt-5/qstring.html#arg)
# only numeric formats are replaced at all. This means "(percentage: %1%)" is valid, without needing
# any kind of escaping that would be necessary for strprintf. Without this, this function
# would wrongly detect '%)' as a printf format specifier.
if numeric:
other = []
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| 38.826923
| 124
| 0.635958
|
92c6cd309a876769c7060a1cc5df12b881a58f35
| 138
|
py
|
Python
|
wsgi.py
|
vmasten/stock_portfolio
|
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
|
[
"MIT"
] | null | null | null |
wsgi.py
|
vmasten/stock_portfolio
|
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
|
[
"MIT"
] | 1
|
2018-12-07T03:57:51.000Z
|
2018-12-07T03:57:51.000Z
|
wsgi.py
|
vmasten/stock_portfolio
|
75a7f17e4891b1ca1374b5e1e5d83dd891b9fddd
|
[
"MIT"
] | null | null | null |
"""Start the app."""
try:
from app import app
except ImportError:
from .app import app
if __name__ == '__main__':
app.run()
| 13.8
| 26
| 0.630435
|
17947a104d09375ac2f88370929545e1d2c0193b
| 261
|
py
|
Python
|
python/sorting_distinct.py
|
ahmadhmirza/CPPractice
|
03e8bd50de29cb631a1a316e0da149560a54c1ca
|
[
"MIT"
] | null | null | null |
python/sorting_distinct.py
|
ahmadhmirza/CPPractice
|
03e8bd50de29cb631a1a316e0da149560a54c1ca
|
[
"MIT"
] | null | null | null |
python/sorting_distinct.py
|
ahmadhmirza/CPPractice
|
03e8bd50de29cb631a1a316e0da149560a54c1ca
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 15:44:00 2020
@author: ahmad
"""
def solution(A):
setA = set(A)
return setA
def main():
A = [2,1,1,2,3,1]
result = solution(A)
print(result)
if __name__ == '__main__':
main()
| 13.736842
| 35
| 0.536398
|
51e3932086fa4930a535aab2bb8c55ba1010add2
| 3,719
|
py
|
Python
|
utils/replay_buffer.py
|
StepNeverStop/RLwithUnity
|
f82be902e327b25c9884d8afe8b892f013197c22
|
[
"MIT"
] | 11
|
2019-04-12T13:17:11.000Z
|
2021-01-26T16:19:32.000Z
|
utils/replay_buffer.py
|
StepNeverStop/RLwithUnity
|
f82be902e327b25c9884d8afe8b892f013197c22
|
[
"MIT"
] | null | null | null |
utils/replay_buffer.py
|
StepNeverStop/RLwithUnity
|
f82be902e327b25c9884d8afe8b892f013197c22
|
[
"MIT"
] | null | null | null |
import numpy as np
class ReplayBuffer(object):
def __init__(self, s_dim, a_counts, buffer_size, use_priority=False):
self.use_priority = use_priority
self.state = np.zeros([buffer_size, s_dim], dtype=np.float32)
self.next_state = np.zeros([buffer_size, s_dim], dtype=np.float32)
self.action = np.zeros([buffer_size, a_counts], dtype=np.float32)
self.prob = np.zeros([buffer_size, a_counts], dtype=np.float32)
self.reward = np.zeros(buffer_size, dtype=np.float32)
self.discounted_reward = np.zeros(buffer_size, dtype=np.float32)
self.sum_tree_n = np.int32(np.ceil(np.log2(buffer_size))) + 1
if self.use_priority:
self.td_error = [np.zeros(np.int32(
np.ceil(buffer_size / np.power(2, i)))) for i in range(self.sum_tree_n)]
else:
self.td_error = np.zeros(buffer_size, dtype=np.float32)
self.advantage = np.zeros(buffer_size, dtype=np.float32)
self.done = np.zeros(buffer_size, dtype=np.float32)
self.now, self.buffer_size, self.max_size = 0, 0, buffer_size
def store(self, state, action, prob, reward, discounted_reward, td_error, advantage, next_state, done):
self.state[self.now] = state
self.next_state[self.now] = next_state
self.action[self.now] = action
self.prob[self.now] = prob
self.reward[self.now] = reward
self.discounted_reward[self.now] = discounted_reward
if self.use_priority:
diff = (np.abs(td_error) if np.abs(td_error) > np.max(
self.td_error[0]) else np.max(self.td_error[0]) + 1) - self.td_error[0][self.now]
for i in range(self.sum_tree_n):
self.td_error[i][self.now // np.power(2, i)] += diff
else:
self.td_error[self.now] = td_error
self.advantage[self.now] = advantage
self.done[self.now] = done
self.now = (self.now + 1) % self.max_size
self.buffer_size = min(self.buffer_size + 1, self.max_size)
def sample_batch(self, batch_size=32):
if self.use_priority:
temp_indexs = np.random.random_sample(
batch_size) * self.td_error[-1][0]
indexs = np.zeros(batch_size, dtype=np.int32)
for index, i in enumerate(temp_indexs):
k = 0
for j in reversed(range(self.sum_tree_n - 1)):
k *= 2
if self.td_error[j][k] < i:
i -= self.td_error[j][k]
k += 1
indexs[index] = k
else:
indexs = np.random.randint(0, self.buffer_size, size=batch_size)
return indexs, dict(
state=self.state[indexs],
next_state=self.next_state[indexs],
action=self.action[indexs],
old_prob=self.prob[indexs],
reward=self.reward[indexs],
discounted_reward=self.discounted_reward[indexs],
td_error=self.td_error[0][indexs] if self.use_priority else self.td_error[indexs],
weights=np.power(self.td_error[-1][0] / (self.buffer_size * self.td_error[0][indexs]), 0.04) / np.max(
self.td_error[0]) if self.use_priority else np.zeros(batch_size),
advantage=self.advantage[indexs],
done=self.done[indexs]
)
def update(self, indexs, td_error):
for i, j in zip(indexs, td_error):
if self.use_priority:
diff = np.abs(j) - self.td_error[0][i]
for k in range(self.sum_tree_n):
self.td_error[k][i // np.power(2, k)] += diff
else:
self.td_error[i] = td_error
| 47.679487
| 114
| 0.588061
|
bb1715daadbd7c1b52a36d69a202566c5b1b5c4a
| 634
|
py
|
Python
|
demo_game/player.py
|
daftspaniel/text_adventure_parser
|
21d0ded71585c31f9055c6e1370530c4dafb40f1
|
[
"BSD-3-Clause"
] | null | null | null |
demo_game/player.py
|
daftspaniel/text_adventure_parser
|
21d0ded71585c31f9055c6e1370530c4dafb40f1
|
[
"BSD-3-Clause"
] | null | null | null |
demo_game/player.py
|
daftspaniel/text_adventure_parser
|
21d0ded71585c31f9055c6e1370530c4dafb40f1
|
[
"BSD-3-Clause"
] | null | null | null |
""" Adventure game player module."""
class Player:
"""Demo game player object"""
def __init__(self):
self._inventory = []
self.map_x = 0
self.map_y = 0
def __str__(self):
return str(self._inventory)
@property
def inventory(self):
"""Returns a copy of the player's list of items."""
return self._inventory.copy()
def collect_item(self, item):
"""Adds an item to the player's inventory."""
self._inventory.append(item)
def drop_item(self, item):
"""Adds an item to the player's inventory."""
self._inventory.remove(item)
| 23.481481
| 59
| 0.596215
|
02a34b5b00e2145105584b2abff3cc63c2ce75b8
| 1,032
|
py
|
Python
|
cldice_loss/pytorch/soft_skeleton.py
|
mmmmimic/clDice
|
05a8a61cdc1674ccdd4b162c2e0f051c144eaf54
|
[
"MIT"
] | 88
|
2021-03-30T07:01:45.000Z
|
2022-03-20T21:16:09.000Z
|
cldice_loss/pytorch/soft_skeleton.py
|
mmmmimic/clDice
|
05a8a61cdc1674ccdd4b162c2e0f051c144eaf54
|
[
"MIT"
] | 16
|
2021-06-04T13:56:47.000Z
|
2022-02-25T03:12:28.000Z
|
cldice_loss/pytorch/soft_skeleton.py
|
mmmmimic/clDice
|
05a8a61cdc1674ccdd4b162c2e0f051c144eaf54
|
[
"MIT"
] | 17
|
2021-06-23T01:50:38.000Z
|
2022-02-14T12:05:49.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def soft_erode(img):
if len(img.shape)==4:
p1 = -F.max_pool2d(-img, (3,1), (1,1), (1,0))
p2 = -F.max_pool2d(-img, (1,3), (1,1), (0,1))
return torch.min(p1,p2)
elif len(img.shape)==5:
p1 = -F.max_pool3d(-img,(3,1,1),(1,1,1),(1,0,0))
p2 = -F.max_pool3d(-img,(1,3,1),(1,1,1),(0,1,0))
p3 = -F.max_pool3d(-img,(1,1,3),(1,1,1),(0,0,1))
return torch.min(torch.min(p1, p2), p3)
def soft_dilate(img):
if len(img.shape)==4:
return F.max_pool2d(img, (3,3), (1,1), (1,1))
elif len(img.shape)==5:
return F.max_pool3d(img,(3,3,3),(1,1,1),(1,1,1))
def soft_open(img):
return soft_dilate(soft_erode(img))
def soft_skel(img, iter_):
img1 = soft_open(img)
skel = F.relu(img-img1)
for j in range(iter_):
img = soft_erode(img)
img1 = soft_open(img)
delta = F.relu(img-img1)
skel = skel + F.relu(delta-skel*delta)
return skel
| 27.891892
| 56
| 0.54845
|
0799cb5dd712bd9ebdd683003e5c09cc07d1094d
| 5,448
|
py
|
Python
|
montecarlo/tests/test_montecarlo.py
|
y-pleim/MonteCarlo
|
a3e328630c3e0866878f4d25749a562a8be56a37
|
[
"MIT"
] | null | null | null |
montecarlo/tests/test_montecarlo.py
|
y-pleim/MonteCarlo
|
a3e328630c3e0866878f4d25749a562a8be56a37
|
[
"MIT"
] | null | null | null |
montecarlo/tests/test_montecarlo.py
|
y-pleim/MonteCarlo
|
a3e328630c3e0866878f4d25749a562a8be56a37
|
[
"MIT"
] | null | null | null |
"""
Unit and regression test for the montecarlo package.
"""
# Import package, test suite, and other packages as needed
import sys
import pytest
import montecarlo
import random
def test_1():
assert 1 == 1
def test_montecarlo_imported():
"""Sample test, will always pass so long as import statement worked."""
assert "montecarlo" in sys.modules
def test_SpinConfiguration():
conf = montecarlo.SpinConfiguration()
conf2 = montecarlo.SpinConfiguration()
# Initialize spin configurations
random.seed(2)
conf2.randomize(8)
conf.initialize([1, 1, 1, 1, 1, 1, 1, 1])
assert conf[0] == 1 # checks __getitem__ method
assert str(conf) == "1, 1, 1, 1, 1, 1, 1, 1." # checks __str__ method
assert conf.get_spins() == [1, 1, 1, 1, 1, 1, 1, 1] # checks get_spins
assert conf.n_sites() == 8 # checks n_sites
assert conf.compute_magnetization() == 8 # checks magnetization calculation
assert conf2.get_spins() == [0, 0, 0, 1, 0, 1, 1, 0] # checks randomize output
conf.set_site(2, 0) # sets site at index 2 to 0
assert conf.get_spins() == [1, 1, 0, 1, 1, 1, 1, 1] # checks set_site method
# testing that an error is raised by set_site when a bad input is inserted
# test function based on Python error-handling docs
def test():
try:
conf.set_site(2, "test")
finally:
return 1
assert test() == 1 # checks that an error has occurred
def test_Hamiltonian():
ham = montecarlo.Hamiltonian()
conf = montecarlo.SpinConfiguration()
conf_sys = montecarlo.SpinConfigurationSystem()
conf.initialize([1, 1, 1, 1, 1, 1, 1, 1])
ham.initialize(-2, 1.1, False)
conf_sys.initialize(2)
assert (
ham.compute_energy(conf) == 22.8
) # checks compute_energy for no periodic boundary conditions
assert (
str(ham) == "J = -2, mu = 1.1, Periodic boundary conditions? False"
) # checks __str__ method
ham.initialize(-2, 1.1, True) # turns on PBC
assert ham.compute_energy(conf) == 24.8 # checks compute_energy for PBC
assert (
round(ham.compute_average_energy(1, conf_sys), 3) == -3.991
) # checks compute_average_energy
assert (
round(ham.compute_average_mag(1, conf_sys), 3) == -0.003
) # checks compute_average_mag
assert (
round(ham.compute_heat_capacity(1, conf_sys), 3) == 0.053
) # checks compute_heat_capacity
assert (
round(ham.compute_mag_susceptibility(1, conf_sys), 3) == 0.006
) # checks compute_mag_susceptibility
(
temps,
energies,
magnetizations,
heat_caps,
mag_suscept,
) = ham.generate_thermal_quantities(conf_sys)
# checks generate_thermal_quantities method:
assert round(temps[9], 1) == 1.0
assert round(energies[9], 3) == -3.991
assert round(magnetizations[9], 3) == -0.003
assert round(heat_caps[9], 3) == 0.053
assert round(mag_suscept[9], 3) == 0.006
# checks metropolis_sweep for PBC and no PBC:
random.seed(2)
conf.randomize(8)
conf2 = ham.metropolis_sweep(conf, 1)
assert conf2.get_spins() == [1, 0, 0, 1, 0, 0, 1, 0]
ham.initialize(-2, 1.1, False)
conf2 = ham.metropolis_sweep(conf, 1)
assert conf2.get_spins() == [1, 0, 0, 1, 0, 0, 1, 0]
conf.initialize([1, 1, 0, 1, 0, 0, 0, 1])
random.seed(2)
conf2 = ham.metropolis_sweep(conf, 1)
assert conf2.get_spins() == [0, 1, 0, 1, 0, 1, 0, 1]
conf.initialize([0, 1, 0, 1, 0, 0, 0, 0])
random.seed(2)
conf2 = ham.metropolis_sweep(conf, 1)
assert conf2.get_spins() == [0, 1, 0, 1, 0, 1, 0, 1]
# checks the average thermal quantity calculations for no PBC
conf_sys.initialize(8)
assert round(ham.compute_average_energy(10, conf_sys), 1) == -3.3
assert round(ham.compute_average_mag(10, conf_sys), 1) == -0.6
assert round(ham.compute_heat_capacity(10, conf_sys), 1) == 0.3
assert round(ham.compute_mag_susceptibility(10, conf_sys), 1) == 0.6
def test_SpinConfigSys():
conf_sys = montecarlo.SpinConfigurationSystem()
conf = montecarlo.SpinConfiguration()
conf_sys.initialize(8)
conf.initialize([0, 0, 0, 0, 0, 0, 0, 1])
assert conf_sys[1] == conf.get_spins() # checks __getitem__ method
# checks that each str(configuration) appears in str(configuration_system)
for i in range(len(conf_sys.collection)):
assert str(conf_sys).count(str(conf_sys.collection[i])) == 1
def test_montecarlo_metropolis():
ham = montecarlo.Hamiltonian()
ham.initialize(-2, 1.1, True)
random.seed(2)
# checks montecarlo_metropolis for 10000 montecarlo steps, 1000 burned steps
energy, mag, heat_cap, mag_sust = montecarlo.montecarlo_metropolis(
8, ham, 10, 10000, 1000
)
assert round(energy, 2) == -3.90
assert round(mag, 2) == -0.57
assert round(heat_cap, 2) == 0.32
assert round(mag_sust, 2) == 0.51
random.seed(2)
# checks the generate_montecarlo_thermal_quantities method
(
temps,
energies,
magnetizations,
heat_caps,
mag_susts,
) = montecarlo.generate_montecarlo_thermal_quantities(8, ham, 9)
index = len(temps) - 1
assert round(temps[index], 1) == 10
assert round(energies[index], 0) == -4
assert round(magnetizations[index], 0) == -1
assert round(heat_caps[index], 0) == 0
assert round(mag_susts[index], 0) == 1
| 32.047059
| 83
| 0.64409
|
5299dbd07a4ed102f193e211f96c9a9e8cc7acf2
| 1,368
|
py
|
Python
|
bin/bin_MD/DF_plot_density_field_residuals.py
|
JohanComparat/nbody-npt-functions
|
a034db4e5a9b2f87dc42eeb6059c4dd280589e4a
|
[
"CC0-1.0"
] | 4
|
2017-11-07T02:15:46.000Z
|
2022-03-03T01:35:53.000Z
|
bin/bin_MD/DF_plot_density_field_residuals.py
|
JohanComparat/nbody-npt-functions
|
a034db4e5a9b2f87dc42eeb6059c4dd280589e4a
|
[
"CC0-1.0"
] | null | null | null |
bin/bin_MD/DF_plot_density_field_residuals.py
|
JohanComparat/nbody-npt-functions
|
a034db4e5a9b2f87dc42eeb6059c4dd280589e4a
|
[
"CC0-1.0"
] | 2
|
2020-08-12T14:26:38.000Z
|
2021-09-14T06:08:58.000Z
|
# cd pySU/pyMultidark/trunk/bin/fortranfile-0.2.1/
import numpy as n
import os
from os.path import join
from astropy.io import fits
import time
import fortranfile
import matplotlib.pyplot as p
box_path=join("..", "MD_1Gpc", "density_field", "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz")
def plotResidual(box_path, name="residual-position-qso-z07.png"):
hd = fits.open(box_path)
L = 1000.
grid = 2048.
dx = L / grid
xr = hd[1].data['x']%dx
yr = hd[1].data['y']%dx
zr = hd[1].data['z']%dx
db=dx/10.
bins= n.arange(0,dx+db,db)
p.hist(xr,bins = bins, label='x', histtype='step')
p.hist(yr,bins = bins, label='y', histtype='step')
p.hist(zr,bins = bins, label='z', histtype='step')
p.legend(loc=3)
p.xlabel('rest(position / 0.48)')
p.ylabel('count')
p.xlim((0-db, dx-db))
p.grid()
p.savefig(join("..", "MD_1Gpc", "density_field", "plots", name)
p.clf()
box_path=join("..", "MD_1Gpc", "density_field", "Box_HAM_z0.701838_nbar1.350000e-05_QSO.DF.fits.gz")
plotResidual(box_path, name="residual-position-qso-z07.png")
box_path=join("..", "MD_1Gpc", "density_field", "Box_HAM_z0.701838_nbar2.400000e-04_ELG.DF.fits.gz")
plotResidual(box_path, name="residual-position-elg-z07.png")
box_path=join("..", "MD_1Gpc", "density_field", "Box_HAM_z0.701838_nbar1.000000e-04_LRG.DF.fits.gz")
plotResidual(box_path, name="residual-position-lrg-z07.png")
| 31.813953
| 100
| 0.701023
|
2a25430c434ebe7e1c42eebe64d6bef07eedd38f
| 5,390
|
py
|
Python
|
tiny_imagenet_custom_resnet/model/main.py
|
bkemmer/GSoC-TensorFlow-2019
|
9bde2939ee073504630e2810496aae3618b5afa2
|
[
"Apache-2.0"
] | 5
|
2019-07-01T09:31:22.000Z
|
2020-03-05T08:19:04.000Z
|
tiny_imagenet_custom_resnet/model/main.py
|
bkemmer/GSoC-TensorFlow-2019
|
9bde2939ee073504630e2810496aae3618b5afa2
|
[
"Apache-2.0"
] | 13
|
2021-04-25T03:32:53.000Z
|
2022-03-11T23:53:16.000Z
|
tiny_imagenet_custom_resnet/model/main.py
|
bkemmer/GSoC-TensorFlow-2019
|
9bde2939ee073504630e2810496aae3618b5afa2
|
[
"Apache-2.0"
] | 8
|
2021-03-08T17:20:43.000Z
|
2022-03-15T11:24:03.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Runner Script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from resnet import ResNet
from load_data import TinyImageNet
import tensorflow as tf
assert tf.__version__.startswith('2.')
from tensorflow.keras.callbacks import Callback, LearningRateScheduler
class EpochCheckpoint(Callback):
def __init__(self, outputPath, every=5, startAt=0):
super(EpochCheckpoint, self).__init__()
self.outputPath = outputPath
self.every = every
self.intEpoch = startAt
def on_epoch_end(self, epoch, log={}):
if (self.intEpoch+1) % self.every == 0:
path = os.path.sep.join([self.outputPath,
"custom_resnet.hdf5".format(self.intEpoch+1)])
self.model.save(path, overwrite=True)
self.intEpoch += 1
def poly_decay(epoch):
maxEpochs = NUM_EPOCHS
baseLR = INIT_LR
power = 1.0
alpha = baseLR * (1 - (epoch / float(maxEpochs))) ** power
return alpha
def run_main(argv):
del argv
kwargs = {}
main(**kwargs)
def main():
ti = TinyImageNet()
model = ResNet.build(None, None, 3, 200, (3, 4, 6), (64, 128, 256, 512), reg=0.0005)
print(f'Custom ResNet model built.')
callbacks = [EpochCheckpoint("./checkpoints/", every=5),
LearningRateScheduler(poly_decay)]
opt = tf.keras.optimizers.Adam(learning_rate=0.1, beta_1=0.9, beta_2=0.999, epsilon=0.1, amsgrad=False)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
print(f'Model compiled. Training on 64x64 sized images upcoming.')
train_gen, val_gen = ti.train_val_gen(train_target=64, train_batch=64, val_target=64, val_batch=64)
model.fit_generator(
train_gen,
steps_per_epoch=100000 // 64,
validation_data=val_gen,
validation_steps=10000 // 64,
epochs=20,
max_queue_size=64 * 2,
callbacks=callbacks,
verbose=1
)
# Save the model
filepath = "./checkpoints/epoch_20_64.hdf5"
model.save(
filepath,
overwrite=True,
include_optimizer=True
)
print(f'Training for 20 epochs on 64x64 sized images has completed. Total Epochs: 20')
# Continue training with 32x32 sized images.
train_gen, val_gen = ti.train_val_gen(train_target=32, train_batch=64, val_target=64, val_batch=64)
model.fit_generator(
train_gen,
steps_per_epoch=100000 // 64,
validation_data=val_gen,
validation_steps=10000 // 64,
epochs=20,
max_queue_size=128,
callbacks=callbacks,
verbose=1
)
filepath = "./checkpoints/epoch_40_32.hdf5"
model.save(
filepath,
overwrite=True,
include_optimizer=True
)
print(f'Training for 20 epochs on 32x32 sized images has completed. Total Epochs: 40')
# Continue training with 16x16 sized images.
train_gen, val_gen = ti.train_val_gen(train_target=16, train_batch=64, val_target=64, val_batch=64)
model.fit_generator(
train_gen,
steps_per_epoch=100000 // 64,
validation_data=val_gen,
validation_steps=10000 // 64,
epochs=20,
max_queue_size=64,
callbacks=callbacks,
verbose=1
)
# Save the model
filepath = "./checkpoints/epoch_60_16.hdf5"
model.save(
filepath,
overwrite=True,
include_optimizer=True
)
print(f'Training for 20 epochs on 16x16 sized images has completed. Total Epochs: 60')
# Continue training with 32x32 sized images.
train_gen, val_gen = ti.train_val_gen(train_target=32, train_batch=64, val_target=64, val_batch=64)
model.fit_generator(
train_gen,
steps_per_epoch=100000 // 64,
validation_data=val_gen,
validation_steps=10000 // 64,
epochs=20,
max_queue_size=64,
verbose=1
)
# Save the model
filepath = "./checkpoints/epoch_80_32.hdf5"
model.save(
filepath,
overwrite=True,
include_optimizer=True
)
print(f'Training for another 20 epochs on 32x32 sized images has completed. Total Epochs: 80')
# Continue training with 64x64 sized images.
train_gen, val_gen = ti.train_val_gen(train_target=64, train_batch=64, val_target=64, val_batch=64)
model.fit_generator(
train_gen,
steps_per_epoch=100000 // 64,
validation_data=val_gen,
validation_steps=10000 // 64,
epochs=20,
max_queue_size=64,
verbose=1
)
# Save the model
filepath = "./checkpoints/epoch_100_64.hdf5"
model.save(
filepath,
overwrite=True,
include_optimizer=True
)
print(f'Training for another 20 epochs on 64x64 sized images has completed. Total Epochs: 100')
if __name__ == '__main__':
NUM_EPOCHS = 30
INIT_LR = 0.01
app.run(run_main)
| 28.072917
| 105
| 0.689796
|
a03e2fea620bb56207da839bdf318827d45dcb0c
| 8,239
|
py
|
Python
|
vyper/compiler/phases.py
|
qasz/vyper
|
cb4f0186bd9d3def098416c8caa20099e775e81d
|
[
"Apache-2.0"
] | 1
|
2021-09-30T09:00:56.000Z
|
2021-09-30T09:00:56.000Z
|
vyper/compiler/phases.py
|
qasz/vyper
|
cb4f0186bd9d3def098416c8caa20099e775e81d
|
[
"Apache-2.0"
] | null | null | null |
vyper/compiler/phases.py
|
qasz/vyper
|
cb4f0186bd9d3def098416c8caa20099e775e81d
|
[
"Apache-2.0"
] | null | null | null |
import copy
import warnings
from typing import Optional, Tuple
from vyper import ast as vy_ast
from vyper import compile_lll, optimizer
from vyper.context import validate_semantics
from vyper.parser import parser
from vyper.parser.global_context import GlobalContext
from vyper.typing import InterfaceImports
class CompilerData:
"""
Object for fetching and storing compiler data for a Vyper contract.
This object acts as a wrapper over the pure compiler functions, triggering
compilation phases as needed and providing the data for use when generating
the final compiler outputs.
Attributes
----------
vyper_module : vy_ast.Module
Top-level Vyper AST node
vyper_module_folded : vy_ast.Module
Folded Vyper AST
global_ctx : GlobalContext
Sorted, contextualized representation of the Vyper AST
lll_nodes : LLLnode
LLL used to generate deployment bytecode
lll_runtime : LLLnode
LLL used to generate runtime bytecode
assembly : list
Assembly instructions for deployment bytecode
assembly_runtime : list
Assembly instructions for runtime bytecode
bytecode : bytes
Deployment bytecode
bytecode_runtime : bytes
Runtime bytecode
"""
def __init__(
self,
source_code: str,
contract_name: str = "VyperContract",
interface_codes: Optional[InterfaceImports] = None,
source_id: int = 0,
) -> None:
"""
Initialization method.
Arguments
---------
source_code : str
Vyper source code.
contract_name : str, optional
The name of the contract being compiled.
interface_codes: Dict, optional
Interfaces that may be imported by the contracts during compilation.
* Formatted as as `{'interface name': {'type': "json/vyper", 'code': "interface code"}}`
* JSON interfaces are given as lists, vyper interfaces as strings
source_id : int, optional
ID number used to identify this contract in the source map.
"""
self.contract_name = contract_name
self.source_code = source_code
self.interface_codes = interface_codes
self.source_id = source_id
@property
def vyper_module(self) -> vy_ast.Module:
if not hasattr(self, "_vyper_module"):
self._vyper_module = generate_ast(self.source_code, self.source_id, self.contract_name)
return self._vyper_module
@property
def vyper_module_folded(self) -> vy_ast.Module:
if not hasattr(self, "_vyper_module_folded"):
self._vyper_module_folded = generate_folded_ast(self.vyper_module, self.interface_codes)
return self._vyper_module_folded
@property
def global_ctx(self) -> GlobalContext:
if not hasattr(self, "_global_ctx"):
self._global_ctx = generate_global_context(
self.vyper_module_folded, self.interface_codes
)
return self._global_ctx
def _gen_lll(self) -> None:
# fetch both deployment and runtime LLL
self._lll_nodes, self._lll_runtime = generate_lll_nodes(self.global_ctx)
@property
def lll_nodes(self) -> parser.LLLnode:
if not hasattr(self, "_lll_nodes"):
self._gen_lll()
return self._lll_nodes
@property
def lll_runtime(self) -> parser.LLLnode:
if not hasattr(self, "_lll_runtime"):
self._gen_lll()
return self._lll_runtime
@property
def assembly(self) -> list:
if not hasattr(self, "_assembly"):
self._assembly = generate_assembly(self.lll_nodes)
return self._assembly
@property
def assembly_runtime(self) -> list:
if not hasattr(self, "_assembly_runtime"):
self._assembly_runtime = generate_assembly(self.lll_runtime)
return self._assembly_runtime
@property
def bytecode(self) -> bytes:
if not hasattr(self, "_bytecode"):
self._bytecode = generate_bytecode(self.assembly)
return self._bytecode
@property
def bytecode_runtime(self) -> bytes:
if not hasattr(self, "_bytecode_runtime"):
self._bytecode_runtime = generate_bytecode(self.assembly_runtime)
return self._bytecode_runtime
def generate_ast(source_code: str, source_id: int, contract_name: str) -> vy_ast.Module:
"""
Generate a Vyper AST from source code.
Arguments
---------
source_code : str
Vyper source code.
source_id : int
ID number used to identify this contract in the source map.
contract_name : str
Name of the contract.
Returns
-------
vy_ast.Module
Top-level Vyper AST node
"""
return vy_ast.parse_to_ast(source_code, source_id, contract_name)
def generate_folded_ast(
vyper_module: vy_ast.Module, interface_codes: Optional[InterfaceImports]
) -> vy_ast.Module:
"""
Perform constant folding operations on the Vyper AST.
Arguments
---------
vyper_module : vy_ast.Module
Top-level Vyper AST node
Returns
-------
vy_ast.Module
Folded Vyper AST
"""
vy_ast.validation.validate_literal_nodes(vyper_module)
vyper_module_folded = copy.deepcopy(vyper_module)
vy_ast.folding.fold(vyper_module_folded)
validate_semantics(vyper_module_folded, interface_codes)
vy_ast.expansion.expand_annotated_ast(vyper_module_folded)
return vyper_module_folded
def generate_global_context(
vyper_module: vy_ast.Module, interface_codes: Optional[InterfaceImports],
) -> GlobalContext:
"""
Generate a contextualized AST from the Vyper AST.
Arguments
---------
vyper_module : vy_ast.Module
Top-level Vyper AST node
interface_codes: Dict, optional
Interfaces that may be imported by the contracts.
Returns
-------
GlobalContext
Sorted, contextualized representation of the Vyper AST
"""
return GlobalContext.get_global_context(vyper_module, interface_codes=interface_codes)
def generate_lll_nodes(global_ctx: GlobalContext) -> Tuple[parser.LLLnode, parser.LLLnode]:
"""
Generate the intermediate representation (LLL) from the contextualized AST.
This phase also includes LLL-level optimizations.
This function returns two values, one for generating deployment bytecode and
the other for generating runtime bytecode. The remaining compilation phases
may be called with either value, depending on the desired final output.
Arguments
---------
global_ctx : GlobalContext
Contextualized Vyper AST
Returns
-------
(LLLnode, LLLnode)
LLL to generate deployment bytecode
LLL to generate runtime bytecode
"""
lll_nodes, lll_runtime = parser.parse_tree_to_lll(global_ctx)
lll_nodes = optimizer.optimize(lll_nodes)
lll_runtime = optimizer.optimize(lll_runtime)
return lll_nodes, lll_runtime
def generate_assembly(lll_nodes: parser.LLLnode) -> list:
"""
Generate assembly instructions from LLL.
Arguments
---------
lll_nodes : str
Top-level LLL nodes. Can be deployment or runtime LLL.
Returns
-------
list
List of assembly instructions.
"""
assembly = compile_lll.compile_to_assembly(lll_nodes)
if _find_nested_opcode(assembly, "DEBUG"):
warnings.warn(
"This code contains DEBUG opcodes! The DEBUG opcode will only work in "
"a supported EVM! It will FAIL on all other nodes!"
)
return assembly
def _find_nested_opcode(assembly, key):
if key in assembly:
return True
else:
sublists = [sub for sub in assembly if isinstance(sub, list)]
return any(_find_nested_opcode(x, key) for x in sublists)
def generate_bytecode(assembly: list) -> bytes:
"""
Generate bytecode from assembly instructions.
Arguments
---------
assembly : list
Assembly instructions. Can be deployment or runtime assembly.
Returns
-------
bytes
Final compiled bytecode.
"""
return compile_lll.assembly_to_evm(assembly)[0]
| 29.851449
| 100
| 0.673019
|
2bc3fad0ad9a74a4e5555b6dc7b4dd56665002cf
| 229
|
py
|
Python
|
tests/context.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | 1
|
2017-04-30T18:26:19.000Z
|
2017-04-30T18:26:19.000Z
|
tests/context.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | 4
|
2017-06-19T09:44:59.000Z
|
2017-06-19T09:58:57.000Z
|
tests/context.py
|
ChickenProp/gragrapy
|
9c24719c6fc843df2c506388aa21e64617cccc8d
|
[
"MIT"
] | null | null | null |
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gragrapy
| 22.9
| 79
| 0.720524
|
18472f1e04316b3ba3d530038ecd7d87eff104b1
| 1,360
|
py
|
Python
|
napari/_vispy/vispy_vectors_layer.py
|
ddawsari/napari
|
bf0c7d081b644c1c19488fc69df5f03460275f3e
|
[
"BSD-3-Clause"
] | 1
|
2021-04-04T21:25:04.000Z
|
2021-04-04T21:25:04.000Z
|
napari/_vispy/vispy_vectors_layer.py
|
ddawsari/napari
|
bf0c7d081b644c1c19488fc69df5f03460275f3e
|
[
"BSD-3-Clause"
] | null | null | null |
napari/_vispy/vispy_vectors_layer.py
|
ddawsari/napari
|
bf0c7d081b644c1c19488fc69df5f03460275f3e
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from vispy.scene.visuals import Mesh
from .vispy_base_layer import VispyBaseLayer
class VispyVectorsLayer(VispyBaseLayer):
def __init__(self, layer):
node = Mesh()
super().__init__(layer, node)
self.layer.events.edge_color.connect(self._on_data_change)
self._reset_base()
self._on_data_change()
def _on_data_change(self, event=None):
if (
len(self.layer._view_vertices) == 0
or len(self.layer._view_faces) == 0
):
vertices = np.zeros((3, self.layer._dims.ndisplay))
faces = np.array([[0, 1, 2]])
face_color = np.array([[0, 0, 0, 0]])
else:
vertices = self.layer._view_vertices[:, ::-1]
faces = self.layer._view_faces
face_color = self.layer._view_face_color
if self.layer._dims.ndisplay == 3 and self.layer._dims.ndim == 2:
vertices = np.pad(vertices, ((0, 0), (0, 1)), mode='constant')
# self.node.set_data(
# vertices=vertices, faces=faces, color=self.layer.current_edge_color
# )
self.node.set_data(
vertices=vertices, faces=faces, face_colors=face_color,
)
self.node.update()
# Call to update order of translation values with new dims:
self._on_matrix_change()
| 32.380952
| 81
| 0.6
|
cf9d5ba828b9108c2594c4de9a4b1834b4fd694c
| 1,290
|
py
|
Python
|
papermerge/core/utils.py
|
defaultroute-eu/papermerge
|
f0ba7f59f2da68f8d2f8a0d05ceef5eca6746467
|
[
"Apache-2.0"
] | 1
|
2021-01-24T14:59:52.000Z
|
2021-01-24T14:59:52.000Z
|
papermerge/core/utils.py
|
defaultroute-eu/papermerge
|
f0ba7f59f2da68f8d2f8a0d05ceef5eca6746467
|
[
"Apache-2.0"
] | null | null | null |
papermerge/core/utils.py
|
defaultroute-eu/papermerge
|
f0ba7f59f2da68f8d2f8a0d05ceef5eca6746467
|
[
"Apache-2.0"
] | null | null | null |
import logging
import re
from datetime import datetime
logger = logging.getLogger(__name__)
def date_2int(kv_format, str_value):
# maps PAPERMERGE_METADATA_DATE_FORMATS to
# https://docs.python.org/3.8/library/datetime.html#strftime-and-strptime-format-codes
if not str_value:
return 0
format_map = {
'dd.mm.yy': '%d.%m.%y',
'dd.mm.yyyy': '%d.%m.%Y',
'dd.M.yyyy': '%d.%B.%Y',
'month': '%B'
}
try:
_date_instance = datetime.strptime(
str_value, format_map[kv_format]
)
except Exception as e:
# this is expected because of automated
# extraction of metadata may fail.
logger.debug(
f"While converting date user format {e}"
)
return 0
return _date_instance.timestamp()
def money_2int(kv_format, str_value):
return number_2int(kv_format, str_value)
def number_2int(kv_format, str_value):
"""
kv_format for number is usually something like this:
dddd
d,ddd
d.ddd
So converting to an integer means just remove from string
non-numeric characters and cast remaining str to integer.
"""
if str_value:
line = re.sub(r'[\,\.]', '', str_value)
return line
return 0
| 23.035714
| 90
| 0.615504
|
49da15f9ef77f9d8b3371f08454df3da541f9c5a
| 338
|
py
|
Python
|
main.py
|
geraldzm/ShipWarGeneticAlgorithm
|
03f87e66eb5eab7a6441162aa089439981fd327c
|
[
"MIT"
] | null | null | null |
main.py
|
geraldzm/ShipWarGeneticAlgorithm
|
03f87e66eb5eab7a6441162aa089439981fd327c
|
[
"MIT"
] | null | null | null |
main.py
|
geraldzm/ShipWarGeneticAlgorithm
|
03f87e66eb5eab7a6441162aa089439981fd327c
|
[
"MIT"
] | null | null | null |
import pygame
from game import Game
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode((800, 600))
clock = pygame.time.Clock()
font = pygame.font.SysFont("Ubuntu", 25)
pygame.display.set_caption("Ships battle")
game = Game(clock, screen, font)
game.run() # run
pygame.quit()
| 17.789474
| 48
| 0.64497
|
19e4188bc9d30c82e23e8f90c0da11078644ef46
| 537
|
py
|
Python
|
Mini Projects/AlternatingCaps/AlternatingCaps.py
|
Snowystar122/Python-Projects
|
faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3
|
[
"MIT"
] | null | null | null |
Mini Projects/AlternatingCaps/AlternatingCaps.py
|
Snowystar122/Python-Projects
|
faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3
|
[
"MIT"
] | null | null | null |
Mini Projects/AlternatingCaps/AlternatingCaps.py
|
Snowystar122/Python-Projects
|
faf05ec388030b8b40ad7a8ca5c2760fb62cf5a3
|
[
"MIT"
] | null | null | null |
def changing_caps(s):
# Creates string with alternating caps, letters only
count = 0
output_str = ""
for i in s:
if i.isalpha() and (count % 2 == 0 or count == 0):
count += 1
output_str += i.upper()
elif i.isalpha() and count % 2 != 0:
count += 1
output_str += i.lower()
else:
output_str += i
return output_str
# Asks for input and returns output here
input_str = input("Input your string here:\n")
print(changing_caps(input_str))
| 26.85
| 58
| 0.560521
|
9f7ca19d332ff42568b897c56bf0fdfac54f26f7
| 2,627
|
py
|
Python
|
sdk/validate.py
|
kineticadb/container-kml-blackbox-sdk
|
7b988f685814d744b4b1dbc9195e63352b2d55f1
|
[
"MIT"
] | 2
|
2019-06-19T19:18:42.000Z
|
2019-12-05T16:46:42.000Z
|
sdk/validate.py
|
kineticadb/container-kml-blackbox-sdk
|
7b988f685814d744b4b1dbc9195e63352b2d55f1
|
[
"MIT"
] | 1
|
2019-03-15T10:53:16.000Z
|
2020-12-04T19:05:48.000Z
|
sdk/validate.py
|
kineticadb/container-kml-blackbox-sdk
|
7b988f685814d744b4b1dbc9195e63352b2d55f1
|
[
"MIT"
] | null | null | null |
import os
from os import path
import sys
import json
import argparse
# TODO: What is the right thing to do here -- raise exceptions or just print nicely and exit?
# TODO: Implement this later
def is_valid_spec(in_file_path):
with open(in_file_path) as json_file_in:
spec = json.load(json_file_in)
for findex, afunc in enumerate(spec['functions']):
print(f"Found function {afunc['bb_module']}:{afunc['bb_function']}")
if "feature_memory_file" in afunc:
print(f"\tFunction {afunc['bb_function']} has feature memory on file {afunc['feature_memory_file']}")
if not path.exists(afunc['feature_memory_file']):
print(f"Fatal Error: Referenced Feature Memory File {afunc['feature_memory_file']} does not exist")
sys.exit(1)
if not path.isfile(afunc['feature_memory_file']):
print(f"Fatal Error: Referenced Feature Memory File {afunc['feature_memory_file']} is not a file")
sys.exit(1)
if not is_valid_featuremem(in_file_path = afunc['feature_memory_file']):
print(f"Fatal Error: Referenced Feature Memory File {afunc['feature_memory_file']} is not a valid feature memory file")
sys.exit(1)
# OK, we have a valid feature memory file, lets fuse it into the mainline
with open(afunc['feature_memory_file']) as featmem_json_file:
spec["functions"][findex]["feature_memory"] = json.load(featmem_json_file)
return True
# TODO: Implement this later
def is_valid_featuremem(in_file_path):
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--spec-in', type=str, help='Path to introspectable specification file')
args = parser.parse_args()
# Input file checks
if not path.exists(args.spec_in):
raise Exception(f"Specificed input specification [{args.spec_in}] does not exist")
sys.exit(1)
if not path.isfile(args.spec_in):
raise Exception(f"Specificed input specification [{args.spec_in}] is not a file")
sys.exit(1)
# Validate only function
if is_valid_spec(in_file_path=args.spec_in):
print("Input specification is valid")
else:
print("Input specification is NOT valid")
sys.exit(0)
if __name__== "__main__":
print("")
print(r" __ ___ ")
print(r"/ _\_ __ ___ ___ / _ \___ _ __ ")
print(r"\ \| '_ \ / _ \/ __|____ / /_\/ _ \ '_ \ ")
print(r"_\ \ |_) | __/ (_|_____/ /_\\ __/ | | |")
print(r"\__/ .__/ \___|\___| \____/\___|_| |_|")
print(r" |_| ")
print("")
print("Introspectable Spec Validator")
print("(c) 2020 Kinetica DB, Inc.")
print("For support, reach support@kinetica.com")
print("")
main()
print("")
| 34.565789
| 124
| 0.681766
|
f7bb4a6002268ce72182110afb039fd92c9197b7
| 46,936
|
py
|
Python
|
core-plugins/flow/4/dss/drop-boxes/BDLSRFortessaDropbox/Processor.py
|
aarpon/obit_flow_core_technology
|
4b66729672232b10beb3509fd5f5bf83ee89d329
|
[
"Apache-2.0"
] | 2
|
2019-08-06T15:11:14.000Z
|
2019-11-05T18:40:54.000Z
|
core-plugins/flow/4/dss/drop-boxes/BDLSRFortessaDropbox/Processor.py
|
aarpon/obit_flow_core_technology
|
4b66729672232b10beb3509fd5f5bf83ee89d329
|
[
"Apache-2.0"
] | 3
|
2017-09-25T13:01:07.000Z
|
2019-10-24T06:43:39.000Z
|
core-plugins/flow/4/dss/drop-boxes/BDLSRFortessaDropbox/Processor.py
|
aarpon/obit_flow_core_technology
|
4b66729672232b10beb3509fd5f5bf83ee89d329
|
[
"Apache-2.0"
] | 1
|
2017-09-25T07:48:38.000Z
|
2017-09-25T07:48:38.000Z
|
import re
import os
import logging
from datetime import datetime
from __builtin__ import None, True
import xml.etree.ElementTree as xml
class Processor:
"""The Processor class performs all steps required for registering datasets
from the assigned dropbox folder."""
# Constructor
def __init__(self, transaction, prefix, version, logDir):
# Store arguments
self._transaction = transaction
self._prefix = prefix
self._version = version
self._incoming = transaction.getIncoming()
# Set up logging
self._logger = self._setup_logger(logDir, prefix)
# Store the transaction time stamp
self._transactionTimeStamp = self._getCurrentTimeStampMS()
# Keep track of the total number of samples in the transaction
self._transactionSampleCount = 0
# Keep track of the collection objects created/accessed in the transaction
self._collectionObjects = {}
def _supportIndexSorting(self, tubeSampleType):
"""Return true if the experiment with given prefix supports index sorting.
@param tubeSampleType: Type of the sample Tube.
"""
prefix = tubeSampleType[:-5]
if prefix in ["FACS_ARIA", "INFLUX", "MOFLO_XDP", "S3E", "SONY_MA900", "SONY_SH800S"]:
return True
elif prefix in ["LSR_FORTESSA", "CYTOFLEX_S"]:
return False
else:
raise Exception("Unknown prefix!")
def _collectionNameFromIdentifier(self, openBISCollectionIdentifier):
"""Converts the collection identifier to a human-friendly string for
the NAME property.
@param openBISCollectionIdentifier Identifier of the collection object.
@return string Human-friendly collection name.
"""
try:
collectionName = openBISCollectionIdentifier[
openBISCollectionIdentifier.rfind('/') + 1:].replace(
"_", " ").capitalize()
except:
collectionName = "COLLECTION"
return collectionName
def _createSampleWithManagedCode(self,
spaceCode,
openBISCollection,
sampleType,
setExperiment=True):
"""Create a sample with automatically generated code.
Depending on whether project samples are enabled in openBIS, the sample
code will be created accordingly.
If project samples are enabled, the code will be in the form: /SPACE/PROJECT/CODE
If project samples are not enabled, the code will be in the form: /SPACE/CODE
@param spaceCode The code of space (the space must exist).
@param openBISCollection The openBIS Collection object (must exist).
@param sampleType Sample type.
@param setExperiment (optional, default = True) Set to true, to assign the
newly created sample to the openBISCollection collection.
@return sample Created ISample
"""
if self._transaction.serverInformation.get('project-samples-enabled') == 'true':
# Build sample identifier
identifier = openBISCollection.getExperimentIdentifier()
project_identifier = identifier[:identifier.rfind('/')]
identifier = project_identifier + "/" + self._getUniqueSampleCode(sampleType)
else:
# Make sure there are not slashes in the spaceCode
spaceCode = spaceCode.replace("/", "")
# Build sample identifier
identifier = "/" + spaceCode + "/" + self._getUniqueSampleCode(sampleType)
# Inform
self._logger.info("Creating sample of type " + sampleType + " with (unique) identifier " + identifier)
# Create the sample
sample = self._transaction.createNewSample(identifier, sampleType)
# Set the experiment (collection)?
if setExperiment:
sample.setExperiment(openBISCollection)
return sample
def _dictToXML(self, d):
"""Converts a dictionary into an XML string."""
# Create an XML node
node = xml.Element("Parameters")
# Add all attributes to the XML node
for k, v in d.iteritems():
node.set(k, v)
# Convert to XML string
xmlString = xml.tostring(node, encoding="UTF-8")
# Return the XML string
return xmlString
def _formatExpDateForPostgreSQL(self, expDate):
"""Format the experiment date to be compatible with postgreSQL's
'timestamp' data type.
@param Date stored in the FCS file, in the form 01-JAN-2013
@return Date in the form 2013-01-01
"""
monthMapper = {'JAN': '01', 'FEB': '02', 'MAR': '03', 'APR': '04',
'MAY': '05', 'JUN': '06', 'JUL': '07', 'AUG': '08',
'SEP': '09', 'OCT': '10', 'NOV': '11', 'DEC': '12'}
# Separate the date into day, month, and year
tryOtherFormat = False
try:
day, month, year = expDate.split("-")
except ValueError:
tryOtherFormat = True
if tryOtherFormat:
try:
day, month, year = expDate.split(" ")
except ValueError:
month = -1
# Try mapping the month to digits (e.g. "06"). If the mapping does
# not work, return "NOT_FOUND"
month = monthMapper.get(month.upper(), "NOT_FOUND")
# Build the date in the correct format. If the month was not found,
# return 01-01-1970
if month == "NOT_FOUND":
return "1970-01-01"
else:
return year + "-" + month + "-" + day
def _getCurrentTimeStampMS(self):
"""Create an univocal time stamp based on the current date and time
(works around incomplete API of Jython 2.5).
"""
t = datetime.now()
return unicode(t.strftime("%y%d%m%H%M%S%f"))
def _getUniqueSampleCode(self, sampleType):
"""Return a unique sample code based on the sample type,
the transaction time stamp, and the global transaction
sample count."""
self._transactionSampleCount += 1
return sampleType + "_" + self._transactionTimeStamp + "_" + str(self._transactionSampleCount)
def _getOrCreateCollection(self,
openBISCollectionIdentifier):
"""Retrieve or register an openBIS Collection with given identifier.
@param openBISCollectionIdentifier The collection openBIS identifier.
@return IExperiment collection
"""
# First, check if the openBISCollectionIdentifier is already known
if openBISCollectionIdentifier in self._collectionObjects:
# Retrieve it from the dictionary
collection = self._collectionObjects[openBISCollectionIdentifier]
else:
# Try retrieving the collection
collection = self._transaction.getExperiment(openBISCollectionIdentifier)
# If the collection does not exist, create it
if collection is None:
# Create a new collection of type "COLLECTION"
collection = self._transaction.createNewExperiment(
openBISCollectionIdentifier, "COLLECTION")
if collection is not None:
# Set the collection name
collectionName = self._collectionNameFromIdentifier(openBISCollectionIdentifier)
collection.setPropertyValue("$NAME", collectionName)
# Add the collection to the local dictionary
self._collectionObjects[openBISCollectionIdentifier] = collection
return collection
def _getSubFolders(self, incoming):
"""Return a list of subfolders of the passed incoming directory.
@param incoming Incoming folder.
@return list of subfolders (String)
"""
incomingStr = incoming.getAbsolutePath()
return [name for name in os.listdir(incomingStr)
if os.path.isdir(os.path.join(incomingStr, name))]
def _processExperimentNode(self,
experimentNode,
openBISExperimentSampleType,
machineName):
"""Process an experiment node.
The ExperimentNode maps to an openBIS Experiment Sample.
The {...}_EXPERIMENT SAMPLE object has following structure:
PARENTS : samples of type ORGANIZATION_UNIT (tags)
CHILDREN : samples of types {...}_TUBESET and {...}_TUBE
and, depending on the acquisition station,
also {...}_PLATE and {...}_WELL
CONTAINED: none
DATASETS: datasets of type ATTACHMENT (several file extensions)
@param experimentNode An XML node corresponding to {...}_EXPERIMENT (sample).
@param openBISExperimentSampleType Type of the experiment sample.
@param machineName Human-friendly name of the acquisition machine.
@return tuple with a Sample of specified type {...}_EXPERIMENT and the
corresponding Collection.
"""
#
# Extract attributes
#
# Get the openBIS openBISCollection identifier
openBISCollectionIdentifier = experimentNode.attrib.get("openBISCollectionIdentifier")
# Get the openBIS identifier
openBISIdentifier = experimentNode.attrib.get("openBISIdentifier")
# Make sure to keep the code length within the limits imposed by
# openBIS for codes
if len(openBISIdentifier) > 80:
openBISIdentifier = openBISIdentifier[0:80]
# Create univocal ID
openBISIdentifier = openBISIdentifier + "_" + self._getCurrentTimeStampMS()
# Get the experiment name
expName = experimentNode.attrib.get("name")
# Get the experiment date and reformat it to be compatible
# with postgreSQL
expDate = self._formatExpDateForPostgreSQL(experimentNode.attrib.get("date"))
if expDate == "1970/01/01":
self._logger.info("Invalid experiment date %s found. " \
"Reverting to 1970/01/01." % expDate)
# Get the description
description = experimentNode.attrib.get("description")
# Get the acquisition hardware
acqHardware = experimentNode.attrib.get("acq_hardware")
# Get the acquisition software
acqSoftware = experimentNode.attrib.get("acq_software")
# Get the owner name
owner = experimentNode.attrib.get("owner_name")
# Get attachments
attachments = experimentNode.attrib.get("attachments")
# Get comma-separated tag list
tagList = experimentNode.attrib.get("tags")
#
# Create needed/requested openBIS objects
#
# Get or create the openBISCollection with given identifier
openBISCollection = self._getOrCreateCollection(openBISCollectionIdentifier)
if openBISCollection is None:
msg = "Failed creating openBISCollection with ID " + \
openBISCollectionIdentifier + "."
self._logger.error(msg)
raise Exception(msg)
# Make sure to create a new sample of type openBISExperimentSampleType
openBISExperimentSample = self._transaction.createNewSample(openBISIdentifier,
openBISExperimentSampleType)
if openBISExperimentSample is None:
msg = "Could not create " + openBISExperimentSampleType + \
" sample wit id " + openBISIdentifier
self._logger.error(msg)
raise Exception(msg)
# Set the openBISCollection
openBISExperimentSample.setExperiment(openBISCollection)
# Add tags (create them if needed)
if tagList != None and tagList != "":
openBISExperimentSample = self._registerTags(openBISExperimentSample,
tagList)
# Add the attachments
if attachments is not None:
if not self._registerAttachmentsToCollection(attachments,
openBISCollection,
openBISExperimentSample):
# Error
msg = "Adding attachments failed!"
self._logger.error(msg)
raise Exception(msg)
#
# Store properties
#
# Store the name
openBISExperimentSample.setPropertyValue(
"$NAME", expName)
# Set the experiment version
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_VERSION", str(self._version))
# Set the date
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_DATE", expDate)
# Set the description
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_DESCRIPTION", description)
# Set the acquisition hardware
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_ACQ_HARDWARE", acqHardware)
# Set the acquisition hardware friendly name
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_ACQ_HARDWARE_FRIENDLY_NAME", machineName)
# Set the acquisition software
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_ACQ_SOFTWARE", acqSoftware)
# Set the experiment owner
openBISExperimentSample.setPropertyValue(
openBISExperimentSampleType + "_OWNER", owner)
#
# Return
#
# Return the openBIS Experiment Sample object and the openBISCollection
return openBISExperimentSample, openBISCollection
def _processSpecimenNode(self,
specimenNode,
specimens,
openBISCollection,
openBISSpecimenSampleType,
specimenName):
"""Register a TubeSet (virtual tube container).
The SpecimenNode maps to an openBIS {...}_SPECIMEN sample.
The {...}_SPECIMEN SAMPLE object has following structure:
PARENTS : none
CHILDREN : samples of type {...}_WELL or {...}_TUBE
CONTAINED: none
DATASETS: none
@param specimenNode An XML node corresponding to a Specimen.
@param openBISCollection A Collection Sample object
@param openBISSpecimenSampleType The Specimen sample type
@param openBISExperimentSampleIdentifier The identifier of the
{...}_EXPERIMENT sample.
@param specimenName The name of the Specimen.
@return ISample sample, or null
"""
# Get the identifier of the space all relevant attributes
openBISSpaceIdentifier = specimenNode.attrib.get("openBISSpaceIdentifier")
# If the Specimen object already exists, return it; otherwise,
# create a new one.
if specimenName in specimens:
self._logger.info("Reusing Specimen " + specimenName)
return specimens[specimenName]
# Create the sample. The Specimen is configured in openBIS to
# auto-generate its own identifier.
openBISSpecimen = self._createSampleWithManagedCode(openBISSpaceIdentifier,
openBISCollection,
openBISSpecimenSampleType,
setExperiment=True)
# Confirm creation
if not openBISSpecimen:
msg = "Could not get or create Specimen"
self._logger.error(msg)
raise Exception(msg)
# Inform
self._logger.info("Created new Specimen " \
"with identifier %s, sample type %s" \
% (openBISSpecimen.getSampleIdentifier(),
openBISSpecimenSampleType))
# Set the name of the Specimen
openBISSpecimen.setPropertyValue("$NAME", specimenName)
# Return the openBIS ISample object
return openBISSpecimen
def _processTrayNode(self,
trayNode,
openBISCollection,
openBISExperimentSampleIdentifier,
openBISTraySampleType):
"""Register a Tray (Plate) based on the Tray XML node.
The {...}_SPECIMEN SAMPLE object has following structure:
PARENTS : {...}_EXPERIMENT
CHILDREN : samples of type {...}_WELL
CONTAINED: none
DATASETS: none
@param trayNode An XML node corresponding to a Tray (Plate).
@param openBISCollection An IExperimentUpdatable object.
@param openBISExperimentSampleIdentifier The identifier of the {...}_EXPERIMENT sample.
@param openBISTraySampleType Tray sample type.
@return ISample sample, or None.
"""
# Get the identifier of the space all relevant attributes
openBISSpaceIdentifier = trayNode.attrib.get("openBISSpaceIdentifier")
# Get the tray name
name = trayNode.attrib.get("name")
# Get the tray geometry
trayGeometry = trayNode.attrib.get("trayGeometry")
# Create the sample. The Plate is configured in openBIS to
# auto-generate its own identifier.
openBISTray = self._createSampleWithManagedCode(openBISSpaceIdentifier,
openBISCollection,
openBISTraySampleType,
setExperiment=True)
if not openBISTray:
msg = "Could not create plate sample."
self._logger.error(msg)
raise Exception(msg)
# Set the parent sample of type {...}_EXPERIMENT
openBISTray.setParentSampleIdentifiers([openBISExperimentSampleIdentifier])
# Set the $NAME property
openBISTray.setPropertyValue("$NAME", name)
# Set the tray geometry
openBISTray.setPropertyValue(openBISTraySampleType + "_GEOMETRY", trayGeometry)
# Return the openBIS ISample object
return openBISTray
def _processTube(self,
tubeNode,
openBISCollection,
openBISTubeSampleType,
openBISExperimentSample,
openBISSpecimenSample,
openBISTubeSetSample):
"""Register a Tube (as a child of a Specimen) based on the Tube XML node.
The {...}_TUBE SAMPLE object has following structure:
PARENTS : {...}_EXPERIMENT, {...}_SPECIMEN, {...}_TUBESET
CHILDREN : none
CONTAINED: none
DATASETS: {...}_FCSFILE (with corresponding .FCS files)
@param tubeNode An XML node corresponding to a Tube.
@param openBISCollection The IExperiment to which the Tube belongs
@param openBISTubeSampleType The Tube sample type.
@param openBISExperimentSample The openBIS Experiment sample (parent).
@param openBISSpecimenSample The openBIS Specimen sample (parent).
@param openBISTubeSetSample The openBIS TubeSet sample (parent).
@return ISample sample, or null
"""
# Get the name
name = tubeNode.attrib.get("name")
# Build the openBIS Identifier
openBISSpaceIdentifier = \
tubeNode.attrib.get("openBISSpaceIdentifier")
# Create the sample. The Tube/Well is configured in openBIS to
# auto-generate its own identifier.
openBISTube = self._createSampleWithManagedCode(openBISSpaceIdentifier,
openBISCollection,
openBISTubeSampleType,
setExperiment=True)
if not openBISTube:
msg = "Could not create TUBE sample with auto-generated identifier"
self._logger.error(msg)
raise Exception(msg)
# Set the $NAME property
openBISTube.setPropertyValue("$NAME", name)
# Does the tube have an "indexSort" attribute?
if self._supportIndexSorting(openBISTubeSampleType):
indexSort = tubeNode.attrib.get("indexSort")
if indexSort is not None:
openBISTube.setPropertyValue(openBISTubeSampleType + "_ISINDEXSORT", indexSort)
# Set the parents
openBISTube.setParentSampleIdentifiers([
openBISExperimentSample.getSampleIdentifier(),
openBISSpecimenSample.getSampleIdentifier(),
openBISTubeSetSample.getSampleIdentifier()
])
# Return the openBIS Tube sample
return openBISTube
def _processTubeSetNode(self,
experimentNode,
openBISCollection,
openBISTubeSetSampleType,
openBISExperimentSampleIdentifier):
"""Register a TubeSet (virtual tube container).
The TubeSetNode maps to an openBIS {...}_TUBESET sample.
The {...}_TUBESET SAMPLE object has following structure:
PARENTS : sample of type {...}_EXPERIMENT.
CHILDREN : samples of type {...}_TUBE
CONTAINED: none
DATASETS: none
@param experimentNode An XML node corresponding to a (virtual) TubeSet.
@param openBISCollection A Collection Sample object
@param openBISTubeSetSampleType The TubeSet sample type
@param openBISExperimentSampleIdentifier The identifier of the
{...}_EXPERIMENT sample.
@return ISample sample, or null
"""
# Get the identifier of the space all relevant attributes
openBISSpaceIdentifier = \
experimentNode.attrib.get("openBISSpaceIdentifier")
# Create the sample. The Tubeset is configured in openBIS to
# auto-generate its own identifier.
openBISTubeSet = self._createSampleWithManagedCode(openBISSpaceIdentifier,
openBISCollection,
openBISTubeSetSampleType,
setExperiment=True)
# Confirm creation
if not openBISTubeSet:
msg = "Could not get or create TubeSet"
self._logger.error(msg)
raise Exception(msg)
# Inform
self._logger.info("Created new TubeSet " \
"with identifier %s, sample type %s" \
% (openBISTubeSet.getSampleIdentifier(),
openBISTubeSetSampleType))
# Set the parent sample of type {...}_EXPERIMENT
openBISTubeSet.setParentSampleIdentifiers([openBISExperimentSampleIdentifier])
# Return the openBIS ISample object
return openBISTubeSet
def _processWell(self,
wellNode,
openBISCollection,
openBISWellSampleType,
openBISExperimentSample,
openBISSpecimenSample,
openBISPlateSample):
"""Register a Well based on the Well XML node.
The {...}_WELL SAMPLE object has following structure:
PARENTS : {...}_EXPERIMENT, {...}_SPECIMEN, {...}_PLATE
CHILDREN : none
CONTAINED: none
DATASETS: {...}_FCSFILE (with corresponding .FCS files)
@param wellNode An XML node corresponding to a Well.
@param openBISCollection The IExperiment to which the Tube belongs
@param openBISWellSampleType The Well sample type.
@param openBISExperimentSample The openBIS Experiment sample (parent).
@param openBISSpecimenSample The openBIS Specimen sample (parent).
@param openBISPlateSample The openBIS Plate sample (parent).
@return ISample sample, or null
"""
# Get the name
name = wellNode.attrib.get("name")
# Build the openBIS Identifier
openBISSpaceIdentifier = wellNode.attrib.get("openBISSpaceIdentifier")
# Create the sample. The Tube/Well is configured in openBIS to
# auto-generate its own identifier.
openBISWell = self._createSampleWithManagedCode(openBISSpaceIdentifier,
openBISCollection,
openBISWellSampleType,
setExperiment=True)
if not openBISWell:
msg = "Could not create WELL sample with auto-generated identifier"
self._logger.error(msg)
raise Exception(msg)
# Set the $NAME property
openBISWell.setPropertyValue("$NAME", name)
# Set the parents
openBISWell.setParentSampleIdentifiers([
openBISExperimentSample.getSampleIdentifier(),
openBISSpecimenSample.getSampleIdentifier(),
openBISPlateSample.getSampleIdentifier()
])
# Return the openBIS Tube sample
return openBISWell
def _processFCSFile(self,
fcsFileNode,
openBISDataSetType,
openBISSample,
openBISCollection):
"""Register the FCS File using the parsed properties file.
@param fcsFileNode An XML node corresponding to an FCS file (dataset).
@param openBISDataSetType The type of the DataSet.
@param openBISSample An ISample object representing a Tube or Well.
@param openBISCollection The openBIS Collection.
"""
# Create a new dataset
dataset = self._transaction.createNewDataSet()
if not dataset:
msg = "Could not get or create dataset"
self._logger.error(msg)
raise Exception(msg)
# Set the dataset type
dataset.setDataSetType(openBISDataSetType)
# Assign the dataset to the sample
dataset.setSample(openBISSample)
# Set the file type
dataset.setFileFormatType("FCS")
# Get the parameter node
for parameterNode in fcsFileNode:
if parameterNode.tag != "FCSFileParamList":
msg = "Expected FSC File Parameter List node!"
self._logger.error(msg)
raise Exception(msg)
parametersXML = self._dictToXML(parameterNode.attrib)
# Store the parameters in the LSR_FORTESSA_FCSFILE_PARAMETERS property
dataset.setPropertyValue(openBISDataSetType + "_PARAMETERS", parametersXML)
# Log the parameters
self._logger.info("FCS file parameters (XML): " + str(parametersXML))
# Assign the file to the dataset (we will use the absolute path)
fileName = fcsFileNode.attrib.get("relativeFileName")
fileName = os.path.join(self._transaction.getIncoming().getAbsolutePath(), fileName)
# Add the file name to the $NAME property
dataset.setPropertyValue("$NAME", os.path.basename(fileName))
# Log
self._logger.info("Registering file: " + fileName)
# Move the file
self._transaction.moveFile(fileName, dataset)
def registerAccessoryFilesAsDatasets(self,
relativePath,
openBISExperimentSampleType,
openBISAccessoryFileDataSetType,
openBISExperimentSample):
"""Scan the given path for files at the root levels that are of the expected format
and associates them to the _EXPERIMENT sample.
Please notice that currently only samples of type CYTOFLEX_S_EXPERIMENT support registering
accessory files as datasets.
"""
# Accepted file formats
file_formats = []
if openBISExperimentSampleType == "CYTOFLEX_S_EXPERIMENT":
# The expected format is .xml
file_formats = [".xml"]
else:
# CYTOFLEX_S_EXPERIMENT is currently the only supported sample type.
# If we find another one, we return (success)
return True
# Also check the dataset type. Currently, this can only be CYTOFLEX_S_ACCESSORY_FILE
if openBISAccessoryFileDataSetType != "CYTOFLEX_S_ACCESSORY_FILE":
# This is an error!
return False
# Report
self._logger.info("Processing accessory files for experiment of type: " + openBISExperimentSampleType)
# Path to be scanned for files
fullpath = os.path.join(self._transaction.getIncoming().getAbsolutePath(), relativePath)
# Report
self._logger.info("All remaining files in folder: " + str(os.listdir(fullpath)))
# Get the list of files at the root of full path that are of the expected format
files = [f for f in os.listdir(fullpath) if
os.path.isfile(os.path.join(fullpath, f)) and
os.path.splitext(f.lower())[-1] in file_formats]
# Report
self._logger.info("Accessory files to process: " + str(files))
# Register them as datasets
for f in files:
# Log
self._logger.info("Registering accessory file: " + f)
# Create a new dataset
dataset = self._transaction.createNewDataSet()
if not dataset:
msg = "Could not get or create dataset"
self._logger.error(msg)
raise Exception(msg)
# Set the dataset type
dataset.setDataSetType(openBISAccessoryFileDataSetType)
# Set the $NAME property
dataset.setPropertyValue("$NAME", f)
# Assign the dataset to the experiment sample
dataset.setSample(openBISExperimentSample)
# Move to a custom destination
dstPath = os.path.join("original", f)
self._transaction.moveFile(os.path.join(fullpath, f), dataset, dstPath)
return True
def run(self):
"""Run the registration."""
# Make sure that incoming is a folder
if not self._transaction.getIncoming().isDirectory():
msg = "Incoming MUST be a folder!"
self._logger.error(msg)
raise Exception(msg)
# Log
self._logger.info("Incoming folder: " + self._transaction.getIncoming().getAbsolutePath())
# There must be just one subfolder: the user subfolder
subFolders = self._getSubFolders(self._transaction.getIncoming())
if len(subFolders) != 1:
msg = "Expected user subfolder!"
self._logger.error(msg)
raise Exception(msg)
# Set the user folder
userFolder = os.path.join(self._transaction.getIncoming().getAbsolutePath(), subFolders[0])
# In the user subfolder we must find the data_structure.ois file
dataFileName = os.path.join(userFolder, "data_structure.ois")
if not os.path.exists(dataFileName):
msg = "File data_structure.ois not found!"
self._logger.error(msg)
raise Exception(msg)
# Now read the data structure file and store all the pointers to
# the properties files. The paths are stored relative to self._incoming,
# so we can easily build the full file paths.
propertiesFileList = []
f = open(dataFileName)
try:
for line in f:
line = re.sub('[\r\n]', '', line)
propertiesFile = os.path.join(self._transaction.getIncoming().getAbsolutePath(), line)
propertiesFileList.append(propertiesFile)
finally:
f.close()
# Process (and ultimately register) all experiments
for propertiesFile in propertiesFileList:
# Log
self._logger.info("* * * Processing: " + propertiesFile + " * * *")
# Read the properties file into an ElementTree
tree = xml.parse(propertiesFile)
# Now register the experiment
self._register(tree)
def _register(self, tree):
"""Register the Experiment using the parsed properties file.
@param tree ElementTree parsed from the properties XML file.
"""
# Keep track of the Specimens already created since they can be
# common to different plates and across plates and tubes
specimens = {}
# Some sample types we will need
openBISDataSetType = self._prefix + "_FCSFILE"
openBISExperimentSampleType = self._prefix + "_EXPERIMENT"
openBISTraySampleType = self._prefix + "_PLATE"
openBISTubeSampleType = self._prefix + "_TUBE"
openBISTubeSetSampleType = self._prefix + "_TUBESET"
openBISSpecimenSampleType = self._prefix + "_SPECIMEN"
openBISWellSampleType = self._prefix + "_WELL"
openBISAccessoryFileDataSetType = self._prefix + "_ACCESSORY_FILE"
# Get the root node (obitXML)
rootNode = tree.getroot()
# Check the tag
if rootNode.tag != "obitXML":
msg = "Unexpected properties root node tag '" + \
rootNode.tag + "'. Invalid file. Cannot process."
self._logger.error(msg)
raise Exception(msg)
# Make sure that we have the expected version of the properties file
file_version = rootNode.attrib.get("version")
if file_version is None or file_version < self._version:
msg = "PROCESSOR::_register(): Expected properties file version " + \
str(self._version) + ". This file is obsolete. Cannot process."
self._logger.error(msg)
raise Exception(msg)
# Store the machine name
machineName = rootNode.attrib.get("machineName")
if machineName is None:
machineName = ""
# Create a virtual TubeSet: an experiment only has 0 or 1 TubeSets.
openBISTubeSetSample = None
# Iterate over the children (Experiment nodes that map to {...}_EXPERIMENT samples)
for experimentNode in rootNode:
# The tag of the immediate children of the root experimentNode
# must be Experiment
if experimentNode.tag != "Experiment":
msg = "Expected Experiment node, found " + experimentNode.tag
self._logger.error(msg)
raise Exception(msg)
# Process an Experiment XML node and get/create an IExperimentUpdatable
openBISExperimentSample, openBISCollection = \
self._processExperimentNode(
experimentNode,
openBISExperimentSampleType,
machineName)
# Process children of the Experiment
for experimentChildNode in experimentNode:
# The child of an Experiment can be a Tray or a Specimen
experimentChildNodeType = experimentChildNode.tag
if experimentChildNodeType == "Specimen":
# A specimen is a direct child of an experiment if there
# is no plate, and the FCS files are therefore associated
# to tubes. In this case, we create a virtual TubeSet
# sample container (one for all Tubes in the experiment).
if openBISTubeSetSample is None:
openBISTubeSetSample = self._processTubeSetNode(experimentNode,
openBISCollection,
openBISTubeSetSampleType,
openBISExperimentSample.getSampleIdentifier())
# Now we process the Specimen node
specimenNameProperty = experimentChildNode.attrib.get("name")
openBISSpecimenSample = self._processSpecimenNode(experimentChildNode,
specimens,
openBISCollection,
openBISSpecimenSampleType,
specimenNameProperty)
# If this is a new Specimen, add it to the specimens dictionary
if specimenNameProperty not in specimens:
specimens[specimenNameProperty] = openBISSpecimenSample
# Now iterate over the children of the Specimen
for tubeNode in experimentChildNode:
# The child of a Specimen is a Tube
if tubeNode.tag != "Tube":
msg = "Expected Tube node!"
self._logger.error(msg)
raise Exception(msg)
# Process the tube node and get the openBIS object
openBISTubeSample = self._processTube(tubeNode,
openBISCollection,
openBISTubeSampleType,
openBISExperimentSample,
openBISSpecimenSample,
openBISTubeSetSample)
# Now process the FCS file
for fcsFileNode in tubeNode:
# The child of a Tube is an FCSFile
if fcsFileNode.tag != "FCSFile":
msg = "Expected FSC File node!"
self._logger.error(msg)
raise Exception(msg)
# Process the FCS file node
self._processFCSFile(fcsFileNode,
openBISDataSetType,
openBISTubeSample,
openBISCollection)
elif experimentChildNodeType == "Tray":
# Process the tray node and get the openBIS object
openBISTraySample = self._processTrayNode(experimentChildNode,
openBISCollection,
openBISExperimentSample.getSampleIdentifier(),
openBISTraySampleType)
# Now iterate over the children of the Tray
for specimenNode in experimentChildNode:
# The child of a Tray is a Specimen
if specimenNode.tag != "Specimen":
msg = "Expected Specimen node!"
self._logger.error(msg)
raise Exception(msg)
# Now we process the Specimen node
specimenNameProperty = specimenNode.attrib.get("name")
openBISSpecimenSample = self._processSpecimenNode(experimentChildNode,
specimens,
openBISCollection,
openBISSpecimenSampleType,
specimenNameProperty)
# If this is a new Specimen, add it to the specimens dictionary
if specimenNameProperty not in specimens:
specimens[specimenNameProperty] = openBISSpecimenSample
for wellNode in specimenNode:
# The child of a Specimen is a Tube
if wellNode.tag != "Well":
msg = "Expected Well node!"
self._logger.error(msg)
raise Exception(msg)
# Process the tube node and get the openBIS object
openBISWellSample = self._processWell(wellNode,
openBISCollection,
openBISWellSampleType,
openBISExperimentSample,
openBISSpecimenSample,
openBISTraySample)
# Now process the FCS file
for fcsFileNode in wellNode:
# The child of a Tube is an FCSFile
if fcsFileNode.tag != "FCSFile":
msg = "Expected FSC File node!"
self._logger.error(msg)
raise Exception(msg)
# Process the FCS file node
self._processFCSFile(fcsFileNode,
openBISDataSetType,
openBISWellSample,
openBISCollection)
else:
msg = "The Node must be either a Specimen or a Tray"
self._logger.error(msg)
raise Exception(msg)
# Register the accessory files (for each Experiment Node)
expRelativePath = experimentNode.attrib.get("relativePath")
self.registerAccessoryFilesAsDatasets(expRelativePath,
openBISExperimentSampleType,
openBISAccessoryFileDataSetType,
openBISExperimentSample)
# Log that we are finished with the registration
self._logger.info("Registration completed")
def _registerAttachmentsToCollection(self,
attachments,
openBISCollection,
openBISExperimentSample):
"""Register a list of files to the collection.
@param attachments Comma-separated list of file names.
@param openBISCollection openBIS Collection object.
@param openBISExperimentSample openBIS Experiment Sample object.
"""
# Extract all relative file names
if type(attachments) is str:
attachmentFiles = attachments.split(";")
elif type(attachments) is list:
attachmentFiles = attachments
else:
return False
for f in attachmentFiles:
# This is an additional security step
if f == '':
continue
# Build the full path
attachmentFilePath = os.path.join(self._transaction.getIncoming().getAbsolutePath(), f)
# Extract the file name
attachmentFileName = os.path.basename(attachmentFilePath)
# Create a dataset of type ATTACHMENT and add it to the
# {...}_EXPERIMENT sample and the containing COLLECTION
attachmentDataSet = self._transaction.createNewDataSet("ATTACHMENT")
self._transaction.moveFile(attachmentFilePath, attachmentDataSet)
attachmentDataSet.setPropertyValue("$NAME", attachmentFileName)
attachmentDataSet.setSample(openBISExperimentSample)
return True
def _registerTags(self,
openBISExperimentSample,
tagList):
"""Register the tags as parent samples of type ORGANIZATION_UNIT.
@param openBISExperimentSample openBIS Experiment Sample object.
@param tagList Comma-separated list of tag names.
"""
# Make sure tagList is not None
if tagList is None:
return openBISExperimentSample
# Collect the parent sample identifiers
tagSampleIdentifiers = []
# Get the individual tag names (with no blank spaces)
tags = ["".join(t.strip()) for t in tagList.split(",")]
# Process all tags
for tag in tags:
if len(tag) == 0:
continue
# The tag (a sample of type "ORGANIZATION_UNIT") is expected to exist.
# If it does not exist, we skip creation, since we do not have NAME
# and DESCRIPTION to create a meaningful one.
sample = self._transaction.getSample(tag)
if sample is not None:
tagSampleIdentifiers.append(tag)
# Add tag samples as parent
openBISExperimentSample.setParentSampleIdentifiers(tagSampleIdentifiers)
return openBISExperimentSample
def _setup_logger(self, log_dir_path, logger_name, level=logging.DEBUG):
"""
Sets up the logger.
@param log_dir_path: Full path to the log folder.
@param logger_name: Name of the logger.
@param level: Debug level (optional, default = logging.DEBUG)
@return Logger object.
"""
# Make sure the logs subforder exist
if not os.path.exists(log_dir_path):
os.makedirs(log_dir_path)
# Path for the log file
log_filename = os.path.join(log_dir_path, "log.txt")
# Set up logging
logging.basicConfig(filename=log_filename, level=level,
format='%(asctime)-15s %(levelname)s: %(message)s')
logger = logging.getLogger(logger_name)
return logger
| 40.047782
| 112
| 0.570735
|
a4b7a17e5af421c7ece5a0168657a576ae4d2317
| 1,037
|
py
|
Python
|
main.py
|
ghnam-ken/PoST
|
19da76d30c828ec4de57a1964c270dfdd13a3b09
|
[
"MIT"
] | 9
|
2021-06-20T16:11:08.000Z
|
2022-03-14T05:50:47.000Z
|
main.py
|
ghnam-ken/PoST
|
19da76d30c828ec4de57a1964c270dfdd13a3b09
|
[
"MIT"
] | 1
|
2022-01-09T18:43:22.000Z
|
2022-01-09T18:43:22.000Z
|
main.py
|
ghnam-ken/PoST
|
19da76d30c828ec4de57a1964c270dfdd13a3b09
|
[
"MIT"
] | null | null | null |
import argparse
def get_options():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='val')
# data
parser.add_argument('--data', type=str, required=True, help='name of data to load')
parser.add_argument('--data_root', type=str, required=True, help='path/to/data/root')
parser.add_argument('--num_worker', type=int, default=16)
parser.add_argument('--num_cp', type=int, default=128, help='the number of control pointsn to sample')
parser.add_argument('--img_size', type=int, default=480)
# model
parser.add_argument('--num_iter', type=int, default=5)
parser.add_argument('--net_path', type=str, default=None)
# others
parser.add_argument('--save_root', type=str, default='./results')
opt = parser.parse_args()
return opt
if __name__ == "__main__":
opt = get_options()
if opt.mode == 'val':
from val import main
else:
raise NotImplementedError(f'{opt.mode} is not implemented yet')
main(opt)
| 29.628571
| 106
| 0.659595
|
66b62627299ad40fc2e0d244d596ee8fc082a7ad
| 5,260
|
py
|
Python
|
Manager/modules/afk.py
|
prince301102/selinia
|
4fd0fe3fbe622539c7557ebecde11976fa0a1d39
|
[
"MIT"
] | 1
|
2021-02-25T14:01:50.000Z
|
2021-02-25T14:01:50.000Z
|
Manager/modules/afk.py
|
prince301102/selinia
|
4fd0fe3fbe622539c7557ebecde11976fa0a1d39
|
[
"MIT"
] | null | null | null |
Manager/modules/afk.py
|
prince301102/selinia
|
4fd0fe3fbe622539c7557ebecde11976fa0a1d39
|
[
"MIT"
] | 2
|
2021-01-08T16:35:10.000Z
|
2021-04-07T16:59:20.000Z
|
import random
from typing import Optional
from telegram import Message, Update, Bot, User
from telegram import MessageEntity, ParseMode
from telegram.error import BadRequest
from telegram.ext import Filters, MessageHandler, run_async
from Manager import dispatcher
from Manager.modules.disable import DisableAbleCommandHandler, DisableAbleRegexHandler
from Manager.modules.sql import afk_sql as sql
from Manager.modules.users import get_user_id
AFK_GROUP = 7
AFK_REPLY_GROUP = 8
@run_async
def afk(bot: Bot, update: Update):
chat = update.effective_chat # type: Optional[Chat]
args = update.effective_message.text.split(None, 1)
if len(args) >= 2:
reason = args[1]
else:
reason = ""
sql.set_afk(update.effective_user.id, reason)
fname = update.effective_user.first_name
update.effective_message.reply_text("{} is now away!😄".format(fname))
@run_async
def no_longer_afk(bot: Bot, update: Update):
user = update.effective_user # type: Optional[User]
chat = update.effective_chat # type: Optional[Chat]
message = update.effective_message # type: Optional[Message]
if not user: # ignore channels
return
res = sql.rm_afk(user.id)
if res:
if message.new_chat_members: #dont say msg
return
firstname = update.effective_user.first_name
try:
options = [
'{} is here!',
'{} is back!',
'{} is now in the chat!',
'{} is awake!',
'{} is back online!',
'{} is finally here!',
'Welcome back! {}',
'Where is {}?\nIn the chat!'
]
chosen_option = random.choice(options)
update.effective_message.reply_text(chosen_option.format(firstname))
except:
return
@run_async
def reply_afk(bot: Bot, update: Update):
message = update.effective_message # type: Optional[Message]
userc = update.effective_user # type: Optional[User]
userc_id = userc.id
if message.entities and message.parse_entities(
[MessageEntity.TEXT_MENTION, MessageEntity.MENTION]):
entities = message.parse_entities(
[MessageEntity.TEXT_MENTION, MessageEntity.MENTION])
chk_users = []
for ent in entities:
if ent.type == MessageEntity.TEXT_MENTION:
user_id = ent.user.id
fst_name = ent.user.first_name
if user_id in chk_users:
return
chk_users.append(user_id)
if ent.type == MessageEntity.MENTION:
user_id = get_user_id(message.text[ent.offset:ent.offset +
ent.length])
if not user_id:
# Should never happen, since for a user to become AFK they must have spoken. Maybe changed username?
return
if user_id in chk_users:
return
chk_users.append(user_id)
try:
chat = bot.get_chat(user_id)
except BadRequest:
print("Error: Could not fetch userid {} for AFK module".
format(user_id))
return
fst_name = chat.first_name
else:
return
check_afk(bot, update, user_id, fst_name, userc_id)
elif message.reply_to_message:
user_id = message.reply_to_message.from_user.id
fst_name = message.reply_to_message.from_user.first_name
check_afk(bot, update, user_id, fst_name, userc_id)
def check_afk(bot, update, user_id, fst_name, userc_id):
chat = update.effective_chat # type: Optional[Chat]
if sql.is_afk(user_id):
user = sql.check_afk_status(user_id)
if not user.reason:
if int(userc_id) == int(user_id):
return
res = "{} is afk".format(fst_name)
update.effective_message.reply_text(res)
else:
if int(userc_id) == int(user_id):
return
res = "{} is afk.\nReason: {}".format(fst_name, user.reason)
update.effective_message.reply_text(res)
__help__ = """
• `/afk <reason>`*:* mark yourself as AFK(away from keyboard).
• `brb <reason>`*:* same as the afk command - but not a command.
When marked as AFK, any mentions will be replied to with a message to say you're not available!
"""
AFK_HANDLER = DisableAbleCommandHandler("afk", afk)
AFK_REGEX_HANDLER = DisableAbleRegexHandler("(?i)brb", afk, friendly="afk")
NO_AFK_HANDLER = MessageHandler(Filters.all & Filters.group, no_longer_afk)
AFK_REPLY_HANDLER = MessageHandler(Filters.all & Filters.group, reply_afk)
dispatcher.add_handler(AFK_HANDLER, AFK_GROUP)
dispatcher.add_handler(AFK_REGEX_HANDLER, AFK_GROUP)
dispatcher.add_handler(NO_AFK_HANDLER, AFK_GROUP)
dispatcher.add_handler(AFK_REPLY_HANDLER, AFK_REPLY_GROUP)
__mod_name__ = "AFK"
__command_list__ = ["afk"]
__handlers__ = [(AFK_HANDLER, AFK_GROUP), (AFK_REGEX_HANDLER, AFK_GROUP), (NO_AFK_HANDLER, AFK_GROUP),
(AFK_REPLY_HANDLER, AFK_REPLY_GROUP)]
| 35.066667
| 120
| 0.620913
|
1de03d6183ddb74eb352696cc33633edcd5fad53
| 131,518
|
py
|
Python
|
src/sagemaker/estimator.py
|
eugeneteoh/sagemaker-python-sdk
|
814dd3df85ab613f97ae7f31572e08bd23b4137c
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/estimator.py
|
eugeneteoh/sagemaker-python-sdk
|
814dd3df85ab613f97ae7f31572e08bd23b4137c
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/estimator.py
|
eugeneteoh/sagemaker-python-sdk
|
814dd3df85ab613f97ae7f31572e08bd23b4137c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import uuid
from abc import ABCMeta, abstractmethod
from six import string_types, with_metaclass
from six.moves.urllib.parse import urlparse
import sagemaker
from sagemaker import git_utils, image_uris, vpc_utils
from sagemaker.analytics import TrainingJobAnalytics
from sagemaker.debugger import ( # noqa: F401 # pylint: disable=unused-import
DEBUGGER_FLAG,
DebuggerHookConfig,
FrameworkProfile,
ProfilerConfig,
ProfilerRule,
Rule,
TensorBoardOutputConfig,
get_default_profiler_rule,
get_rule_container_image_uri,
)
from sagemaker.deprecations import removed_function, removed_kwargs, renamed_kwargs
from sagemaker.fw_utils import (
UploadedCode,
_region_supports_debugger,
_region_supports_profiler,
get_mp_parameters,
tar_and_upload_dir,
validate_source_dir,
)
from sagemaker.inputs import TrainingInput
from sagemaker.job import _Job
from sagemaker.local import LocalSession
from sagemaker.model import (
CONTAINER_LOG_LEVEL_PARAM_NAME,
DIR_PARAM_NAME,
JOB_NAME_PARAM_NAME,
NEO_ALLOWED_FRAMEWORKS,
SAGEMAKER_REGION_PARAM_NAME,
SCRIPT_PARAM_NAME,
Model,
)
from sagemaker.predictor import Predictor
from sagemaker.s3 import S3Uploader, parse_s3_url
from sagemaker.session import Session
from sagemaker.transformer import Transformer
from sagemaker.utils import (
base_from_name,
base_name_from_image,
build_dict,
get_config_value,
name_from_base,
)
from sagemaker.workflow.entities import Expression
from sagemaker.workflow.parameters import Parameter
from sagemaker.workflow.properties import Properties
logger = logging.getLogger(__name__)
class EstimatorBase(with_metaclass(ABCMeta, object)): # pylint: disable=too-many-public-methods
"""Handle end-to-end Amazon SageMaker training and deployment tasks.
For introduction to model training and deployment, see
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Subclasses must define a way to determine what image to use for training,
what hyperparameters to use, and how to create an appropriate predictor
instance.
"""
def __init__(
self,
role,
instance_count=None,
instance_type=None,
volume_size=30,
volume_kms_key=None,
max_run=24 * 60 * 60,
input_mode="File",
output_path=None,
output_kms_key=None,
base_job_name=None,
sagemaker_session=None,
tags=None,
subnets=None,
security_group_ids=None,
model_uri=None,
model_channel_name="model",
metric_definitions=None,
encrypt_inter_container_traffic=False,
use_spot_instances=False,
max_wait=None,
checkpoint_s3_uri=None,
checkpoint_local_path=None,
rules=None,
debugger_hook_config=None,
tensorboard_output_config=None,
enable_sagemaker_metrics=None,
enable_network_isolation=False,
profiler_config=None,
disable_profiler=False,
environment=None,
max_retry_attempts=None,
**kwargs,
):
"""Initialize an ``EstimatorBase`` instance.
Args:
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
instance_count (int): Number of Amazon EC2 instances to use
for training.
instance_type (str): Type of EC2 instance to use for training,
for example, 'ml.c4.xlarge'.
volume_size (int): Size in GB of the EBS volume to use for
storing input data during training (default: 30). Must be large
enough to store training data if File Mode is used (which is the
default).
volume_kms_key (str): Optional. KMS key ID for encrypting EBS
volume attached to the training instance (default: None).
max_run (int): Timeout in seconds for training (default: 24 *
60 * 60). After this amount of time Amazon SageMaker terminates
the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports
(default: 'File'). Valid modes:
'File' - Amazon SageMaker copiesthe training dataset from the
S3 location to a local directory.
'Pipe' - Amazon SageMaker streams data directly from S3 to the
container via a Unix-named pipe.
'FastFile' - Amazon SageMaker streams data from S3 on demand instead of
downloading the entire dataset before training begins. This argument can
be overriden on a per-channel basis using
``sagemaker.inputs.TrainingInput.input_mode``.
output_path (str): S3 location for saving the training result (model
artifacts and output files). If not specified, results are
stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
file:// urls are used for local mode. For example: 'file://model/'
will save to the model folder in the current directory.
output_kms_key (str): Optional. KMS key ID for encrypting the
training output (default: Your IAM role's KMS key for Amazon S3).
If you don't provide a KMS key ID, Amazon SageMaker uses the
default KMS key for Amazon S3 of the account linked to your
IAM role.
base_job_name (str): Prefix for training job name when the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method launches.
If not specified, the estimator generates a default job name
based on the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
tags (list[dict]): List of tags for labeling a training job. For
more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
subnets (list[str]): List of subnet ids. If not specified training
job will be created without VPC config.
security_group_ids (list[str]): List of security group ids. If not
specified training job will be created without VPC config.
model_uri (str): URI where a pre-trained model is stored, either
locally or in S3 (default: None). If specified, the estimator
will create a channel pointing to the model so the training job
can download it. This model can be a 'model.tar.gz' from a
previous training job, or other artifacts coming from a
different source.
In local mode, this should point to the path in which the model
is located and not the file itself, as local Docker containers
will try to mount the URI as a volume.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html#td-deserialization
model_channel_name (str): Name of the channel where 'model_uri' will
be downloaded (default: 'model').
metric_definitions (list[dict]): A list of dictionaries that defines
the metric(s) used to evaluate the training jobs. Each
dictionary contains two keys: 'Name' for the name of the metric,
and 'Regex' for the regular expression used to extract the
metric from the logs. This should be defined only for jobs that
don't use an Amazon algorithm.
encrypt_inter_container_traffic (bool): Specifies whether traffic
between training containers is encrypted for the training job
(default: ``False``).
use_spot_instances (bool): Specifies whether to use SageMaker
Managed Spot instances for training. If enabled then the
``max_wait`` arg should also be set.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html
(default: ``False``).
max_wait (int): Timeout in seconds waiting for spot training
job (default: None). After this amount of time Amazon
SageMaker will stop waiting for managed spot training job to
complete (default: ``None``).
checkpoint_s3_uri (str): The S3 URI in which to persist checkpoints
that the algorithm persists (if any) during training. (default:
``None``).
checkpoint_local_path (str): The local path that the algorithm
writes its checkpoints to. SageMaker will persist all files
under this path to `checkpoint_s3_uri` continually during
training. On job startup the reverse happens - data from the
s3 location is downloaded to this path before the algorithm is
started. If the path is unset then SageMaker assumes the
checkpoints will be provided under `/opt/ml/checkpoints/`.
(default: ``None``).
rules (list[:class:`~sagemaker.debugger.RuleBase`]): A list of
:class:`~sagemaker.debugger.RuleBase` objects used to define
SageMaker Debugger rules for real-time analysis
(default: ``None``). For more information,
see `Continuous analyses through rules
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html
#continuous-analyses-through-rules)>`_.
debugger_hook_config (:class:`~sagemaker.debugger.DebuggerHookConfig` or bool):
Configuration for how debugging information is emitted with
SageMaker Debugger. If not specified, a default one is created using
the estimator's ``output_path``, unless the region does not
support SageMaker Debugger. To disable SageMaker Debugger,
set this parameter to ``False``. For more information, see
`Capture real-time debugging data during model training in Amazon SageMaker
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-debugging-data-during-model-training-in-amazon-sagemaker>`_.
tensorboard_output_config (:class:`~sagemaker.debugger.TensorBoardOutputConfig`):
Configuration for customizing debugging visualization using TensorBoard
(default: ``None``). For more information,
see `Capture real time tensorboard data
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-tensorboard-data-from-the-debugging-hook>`_.
enable_sagemaker_metrics (bool): enable SageMaker Metrics Time
Series. For more information, see `AlgorithmSpecification API
<https://docs.aws.amazon.com/sagemaker/latest/dg/
API_AlgorithmSpecification.html#SageMaker-Type-AlgorithmSpecification-
EnableSageMakerMetricsTimeSeries>`_.
(default: ``None``).
enable_network_isolation (bool): Specifies whether container will
run in network isolation mode (default: ``False``). Network
isolation mode restricts the container access to outside networks
(such as the Internet). The container does not make any inbound or
outbound network calls. Also known as Internet-free mode.
profiler_config (:class:`~sagemaker.debugger.ProfilerConfig`):
Configuration for how SageMaker Debugger collects
monitoring and profiling information from your training job.
If not specified, a default configuration is created using
the estimator's ``output_path``, unless the region does not
support SageMaker Debugger. To disable SageMaker Debugger
monitoring and profiling, set the
``disable_profiler`` parameter to ``True``.
disable_profiler (bool): Specifies whether Debugger monitoring and profiling
will be disabled (default: ``False``).
environment (dict[str, str]) : Environment variables to be set for
use during training job (default: ``None``)
max_retry_attempts (int): The number of times to move a job to the STARTING status.
You can specify between 1 and 30 attempts.
If the value of attempts is greater than zero,
the job is retried on InternalServerFailure
the same number of attempts as the value.
You can cap the total duration for your job by setting ``max_wait`` and ``max_run``
(default: ``None``)
"""
instance_count = renamed_kwargs(
"train_instance_count", "instance_count", instance_count, kwargs
)
instance_type = renamed_kwargs(
"train_instance_type", "instance_type", instance_type, kwargs
)
max_run = renamed_kwargs("train_max_run", "max_run", max_run, kwargs)
use_spot_instances = renamed_kwargs(
"train_use_spot_instances", "use_spot_instances", use_spot_instances, kwargs
)
max_wait = renamed_kwargs("train_max_wait", "max_wait", max_wait, kwargs)
volume_size = renamed_kwargs("train_volume_size", "volume_size", volume_size, kwargs)
volume_kms_key = renamed_kwargs(
"train_volume_kms_key", "volume_kms_key", volume_kms_key, kwargs
)
if instance_count is None or instance_type is None:
raise ValueError("Both instance_count and instance_type are required.")
self.role = role
self.instance_count = instance_count
self.instance_type = instance_type
self.volume_size = volume_size
self.volume_kms_key = volume_kms_key
self.max_run = max_run
self.input_mode = input_mode
self.tags = tags
self.metric_definitions = metric_definitions
self.model_uri = model_uri
self.model_channel_name = model_channel_name
self.code_uri = None
self.code_channel_name = "code"
if self.instance_type in ("local", "local_gpu"):
if self.instance_type == "local_gpu" and self.instance_count > 1:
raise RuntimeError("Distributed Training in Local GPU is not supported")
self.sagemaker_session = sagemaker_session or LocalSession()
if not isinstance(self.sagemaker_session, sagemaker.local.LocalSession):
raise RuntimeError(
"instance_type local or local_gpu is only supported with an"
"instance of LocalSession"
)
else:
self.sagemaker_session = sagemaker_session or Session()
self.base_job_name = base_job_name
self._current_job_name = None
if (
not self.sagemaker_session.local_mode
and output_path
and output_path.startswith("file://")
):
raise RuntimeError("file:// output paths are only supported in Local Mode")
self.output_path = output_path
self.output_kms_key = output_kms_key
self.latest_training_job = None
self.jobs = []
self.deploy_instance_type = None
self._compiled_models = {}
# VPC configurations
self.subnets = subnets
self.security_group_ids = security_group_ids
self.encrypt_inter_container_traffic = encrypt_inter_container_traffic
self.use_spot_instances = use_spot_instances
self.max_wait = max_wait
self.checkpoint_s3_uri = checkpoint_s3_uri
self.checkpoint_local_path = checkpoint_local_path
self.rules = rules
self.debugger_hook_config = debugger_hook_config
self.tensorboard_output_config = tensorboard_output_config
self.debugger_rule_configs = None
self.collection_configs = None
self.enable_sagemaker_metrics = enable_sagemaker_metrics
self._enable_network_isolation = enable_network_isolation
self.profiler_config = profiler_config
self.disable_profiler = disable_profiler
self.environment = environment
self.max_retry_attempts = max_retry_attempts
if not _region_supports_profiler(self.sagemaker_session.boto_region_name):
self.disable_profiler = True
self.profiler_rule_configs = None
self.profiler_rules = None
self.debugger_rules = None
@abstractmethod
def training_image_uri(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does
the model training, calls this method to find the image to use for model
training.
Returns:
str: The URI of the Docker image.
"""
@abstractmethod
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which
trains the model, calls this method to find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
def enable_network_isolation(self):
"""Return True if this Estimator will need network isolation to run.
Returns:
bool: Whether this Estimator needs network isolation or not.
"""
return self._enable_network_isolation
def prepare_workflow_for_training(self, job_name=None):
"""Calls _prepare_for_training. Used when setting up a workflow.
Args:
job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
self._prepare_for_training(job_name=job_name)
def _ensure_base_job_name(self):
"""Set ``self.base_job_name`` if it is not set already."""
# honor supplied base_job_name or generate it
if self.base_job_name is None:
self.base_job_name = base_name_from_image(self.training_image_uri())
def _get_or_create_name(self, name=None):
"""Generate a name based on the base job name or training image if needed.
Args:
name (str): User-supplied name. If not specified, a name is generated from
the base job name or training image.
Returns:
str: Either the user-supplied name or a generated name.
"""
if name:
return name
self._ensure_base_job_name()
return name_from_base(self.base_job_name)
def _prepare_for_training(self, job_name=None):
"""Set any values in the estimator that need to be set before training.
Args:
job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
self._current_job_name = self._get_or_create_name(job_name)
# if output_path was specified we use it otherwise initialize here.
# For Local Mode with local_code=True we don't need an explicit output_path
if self.output_path is None:
local_code = get_config_value("local.local_code", self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
self.output_path = ""
else:
self.output_path = "s3://{}/".format(self.sagemaker_session.default_bucket())
self._prepare_rules()
self._prepare_debugger_for_training()
self._prepare_profiler_for_training()
def _prepare_rules(self):
"""Rules list includes both debugger and profiler rules.
Customer can explicitly disable any rule by setting rules to an empty list.
"""
self.debugger_rules = []
self.profiler_rules = []
if self.rules is not None:
for rule in self.rules:
if isinstance(rule, Rule):
self.debugger_rules.append(rule)
elif isinstance(rule, ProfilerRule):
self.profiler_rules.append(rule)
else:
raise RuntimeError(
"Rules list can only contain sagemaker.debugger.Rule "
+ "and sagemaker.debugger.ProfilerRule"
)
def _prepare_debugger_for_training(self):
"""Prepare debugger rules and debugger configs for training."""
if self.debugger_rules and self.debugger_hook_config is None:
self.debugger_hook_config = DebuggerHookConfig(s3_output_path=self.output_path)
# If debugger_hook_config was provided without an S3 URI, default it for the customer.
if self.debugger_hook_config and not self.debugger_hook_config.s3_output_path:
self.debugger_hook_config.s3_output_path = self.output_path
self.debugger_rule_configs = self._prepare_debugger_rules()
self._prepare_collection_configs()
def _prepare_debugger_rules(self):
"""Set any necessary values in debugger rules, if they are provided."""
debugger_rule_configs = []
if self.debugger_rules:
for rule in self.debugger_rules:
self._set_default_rule_config(rule)
self._set_source_s3_uri(rule)
rule.prepare_actions(self._current_job_name)
debugger_rule_configs.append(rule.to_debugger_rule_config_dict())
return debugger_rule_configs
def _prepare_collection_configs(self):
"""De-duplicate configurations and save them in the debugger hook configuration."""
# Create a set to de-duplicate CollectionConfigs.
self.collection_configs = set()
# Iterate through the debugger rules and add their respective CollectionConfigs to the set.
if self.debugger_rules:
for rule in self.debugger_rules:
self.collection_configs.update(rule.collection_configs)
# Add the CollectionConfigs from DebuggerHookConfig to the set.
if self.debugger_hook_config:
self.collection_configs.update(self.debugger_hook_config.collection_configs or [])
def _prepare_profiler_for_training(self):
"""Set necessary values and do basic validations in profiler config and profiler rules.
When user explicitly set rules to an empty list, default profiler rule won't be enabled.
Default profiler rule will be enabled in supported regions when either:
1. user doesn't specify any rules, i.e., rules=None; or
2. user only specify debugger rules, i.e., rules=[Rule.sagemaker(...)]
"""
if self.disable_profiler:
if self.profiler_config:
raise RuntimeError("profiler_config cannot be set when disable_profiler is True.")
if self.profiler_rules:
raise RuntimeError("ProfilerRule cannot be set when disable_profiler is True.")
elif _region_supports_profiler(self.sagemaker_session.boto_region_name):
if self.profiler_config is None:
self.profiler_config = ProfilerConfig(s3_output_path=self.output_path)
if self.rules is None or (self.rules and not self.profiler_rules):
self.profiler_rules = [get_default_profiler_rule()]
if self.profiler_config and not self.profiler_config.s3_output_path:
self.profiler_config.s3_output_path = self.output_path
self.profiler_rule_configs = self._prepare_profiler_rules()
def _prepare_profiler_rules(self):
"""Set any necessary values in profiler rules, if they are provided."""
profiler_rule_configs = []
if self.profiler_rules:
for rule in self.profiler_rules:
self._set_default_rule_config(rule)
self._set_source_s3_uri(rule)
profiler_rule_configs.append(rule.to_profiler_rule_config_dict())
return profiler_rule_configs
def _set_default_rule_config(self, rule):
"""Set default rule configurations.
Args:
rule (:class:`~sagemaker.debugger.RuleBase`): Any rule object that derives from RuleBase
"""
if rule.image_uri == "DEFAULT_RULE_EVALUATOR_IMAGE":
rule.image_uri = get_rule_container_image_uri(self.sagemaker_session.boto_region_name)
rule.instance_type = None
rule.volume_size_in_gb = None
def _set_source_s3_uri(self, rule):
"""Set updated source S3 uri when specified.
Args:
rule (:class:`~sagemaker.debugger.RuleBase`): Any rule object that derives from RuleBase
"""
if "source_s3_uri" in (rule.rule_parameters or {}):
parse_result = urlparse(rule.rule_parameters["source_s3_uri"])
if parse_result.scheme != "s3":
desired_s3_uri = os.path.join(
"s3://",
self.sagemaker_session.default_bucket(),
rule.name,
str(uuid.uuid4()),
)
s3_uri = S3Uploader.upload(
local_path=rule.rule_parameters["source_s3_uri"],
desired_s3_uri=desired_s3_uri,
sagemaker_session=self.sagemaker_session,
)
rule.rule_parameters["source_s3_uri"] = s3_uri
def latest_job_debugger_artifacts_path(self):
"""Gets the path to the DebuggerHookConfig output artifacts.
Returns:
str: An S3 path to the output artifacts.
"""
self._ensure_latest_training_job(
error_message="""Cannot get the Debugger artifacts path.
The Estimator is not associated with a training job."""
)
if self.debugger_hook_config is not None:
return os.path.join(
self.debugger_hook_config.s3_output_path,
self.latest_training_job.name,
"debug-output",
)
return None
def latest_job_tensorboard_artifacts_path(self):
"""Gets the path to the TensorBoardOutputConfig output artifacts.
Returns:
str: An S3 path to the output artifacts.
"""
self._ensure_latest_training_job(
error_message="""Cannot get the TensorBoard artifacts path.
The Estimator is not associated with a training job."""
)
if self.debugger_hook_config is not None:
return os.path.join(
self.tensorboard_output_config.s3_output_path,
self.latest_training_job.name,
"tensorboard-output",
)
return None
def latest_job_profiler_artifacts_path(self):
"""Gets the path to the profiling output artifacts.
Returns:
str: An S3 path to the output artifacts.
"""
self._ensure_latest_training_job(
error_message="""Cannot get the profiling output artifacts path.
The Estimator is not associated with a training job."""
)
if self.profiler_config is not None:
return os.path.join(
self.profiler_config.s3_output_path,
self.latest_training_job.name,
"profiler-output",
)
return None
def fit(self, inputs=None, wait=True, logs="All", job_name=None, experiment_config=None):
"""Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start
model training. The API uses configuration you provided to create the
estimator and the specified input training data to send the
CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training
successfully completes, you can call the ``deploy()`` method to host the
model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.inputs.TrainingInput or
sagemaker.inputs.FileSystemInput): Information about the training data.
This can be one of four types:
* (str) the S3 location where training data is saved, or a file:// path in
local mode.
* (dict[str, str] or dict[str, sagemaker.inputs.TrainingInput] or
dict[str, sagemaker.inputs.FileSystemInput]) If using multiple channels for
training data, you can specify a dict mapping channel names to strings or
:func:`~sagemaker.inputs.TrainingInput` objects or
:func:`~sagemaker.inputs.FileSystemInput` objects.
* (sagemaker.inputs.TrainingInput) - channel configuration for S3 data sources
that can provide additional information as well as the path to the training
dataset.
See :func:`sagemaker.inputs.TrainingInput` for full details.
* (sagemaker.inputs.FileSystemInput) - channel configuration for
a file system data source that can provide additional information as well as
the path to the training dataset.
wait (bool): Whether the call should wait until the job completes (default: True).
logs ([str]): A list of strings specifying which logs to print. Acceptable
strings are "All", "None", "Training", or "Rules". To maintain backwards
compatibility, boolean values are also accepted and converted to strings.
Only meaningful when wait is True.
job_name (str): Training job name. If not specified, the estimator generates
a default job name based on the training image name and current timestamp.
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
"""
self._prepare_for_training(job_name=job_name)
self.latest_training_job = _TrainingJob.start_new(self, inputs, experiment_config)
self.jobs.append(self.latest_training_job)
if wait:
self.latest_training_job.wait(logs=logs)
def _compilation_job_name(self):
"""Placeholder docstring"""
base_name = self.base_job_name or base_name_from_image(self.training_image_uri())
return name_from_base("compilation-" + base_name)
def compile_model(
self,
target_instance_family,
input_shape,
output_path,
framework=None,
framework_version=None,
compile_max_run=15 * 60,
tags=None,
target_platform_os=None,
target_platform_arch=None,
target_platform_accelerator=None,
compiler_options=None,
**kwargs,
):
"""Compile a Neo model using the input model.
Args:
target_instance_family (str): Identifies the device that you want to
run your model after compilation, for example: ml_c5. For allowed
strings see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.
input_shape (dict): Specifies the name and shape of the expected
inputs for your trained model in json dictionary form, for
example: {'data':[1,3,1024,1024]}, or {'var1': [1,1,28,28],
'var2':[1,1,28,28]}
output_path (str): Specifies where to store the compiled model
framework (str): The framework that is used to train the original
model. Allowed values: 'mxnet', 'tensorflow', 'keras', 'pytorch',
'onnx', 'xgboost'
framework_version (str): The version of the framework
compile_max_run (int): Timeout in seconds for compilation (default:
15 * 60). After this amount of time Amazon SageMaker Neo
terminates the compilation job regardless of its current status.
tags (list[dict]): List of tags for labeling a compilation job. For
more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
target_platform_os (str): Target Platform OS, for example: 'LINUX'.
For allowed strings see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.
It can be used instead of target_instance_family.
target_platform_arch (str): Target Platform Architecture, for example: 'X86_64'.
For allowed strings see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.
It can be used instead of target_instance_family.
target_platform_accelerator (str, optional): Target Platform Accelerator,
for example: 'NVIDIA'. For allowed strings see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html.
It can be used instead of target_instance_family.
compiler_options (dict, optional): Additional parameters for compiler.
Compiler Options are TargetPlatform / target_instance_family specific. See
https://docs.aws.amazon.com/sagemaker/latest/dg/API_OutputConfig.html for details.
**kwargs: Passed to invocation of ``create_model()``.
Implementations may customize ``create_model()`` to accept
``**kwargs`` to customize model creation during deploy. For
more, see the implementation docs.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See
:func:`~sagemaker.model.Model` for full details.
"""
if framework and framework not in NEO_ALLOWED_FRAMEWORKS:
raise ValueError(
"Please use valid framework, allowed values: {}".format(NEO_ALLOWED_FRAMEWORKS)
)
if (framework is None) != (framework_version is None):
raise ValueError("You should provide framework and framework_version at the same time.")
model = self.create_model(**kwargs)
self._compiled_models[target_instance_family] = model.compile(
target_instance_family,
input_shape,
output_path,
self.role,
tags,
self._compilation_job_name(),
compile_max_run,
framework=framework,
framework_version=framework_version,
target_platform_os=target_platform_os,
target_platform_arch=target_platform_arch,
target_platform_accelerator=target_platform_accelerator,
compiler_options=compiler_options,
)
return self._compiled_models[target_instance_family]
@classmethod
def attach(cls, training_job_name, sagemaker_session=None, model_channel_name="model"):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass
is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates
the actual conversion of a training job description to the arguments
that the class constructor expects. After attaching, if the training job
has a Complete status, it can be ``deploy()`` ed to create a SageMaker
Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block until the training job
completes, but logs of the training job will not display. To see the logs
content, please call ``logs()``
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.logs()
>>> attached_estimator.deploy()
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded (default: 'model'). If no channel
with the same name exists in the training job, this option will
be ignored.
Returns:
Instance of the calling ``Estimator`` Class with the attached
training job.
"""
sagemaker_session = sagemaker_session or Session()
job_details = sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=training_job_name
)
init_params = cls._prepare_init_params_from_job_description(job_details, model_channel_name)
tags = sagemaker_session.sagemaker_client.list_tags(
ResourceArn=job_details["TrainingJobArn"]
)["Tags"]
init_params.update(tags=tags)
estimator = cls(sagemaker_session=sagemaker_session, **init_params)
estimator.latest_training_job = _TrainingJob(
sagemaker_session=sagemaker_session, job_name=training_job_name
)
estimator._current_job_name = estimator.latest_training_job.name
estimator.latest_training_job.wait(logs="None")
return estimator
def logs(self):
"""Display the logs for Estimator's training job.
If the output is a tty or a Jupyter cell, it will be color-coded based
on which instance the log entry is from.
"""
self.sagemaker_session.logs_for_job(self.latest_training_job.name, wait=True)
def deploy(
self,
initial_instance_count=None,
instance_type=None,
serializer=None,
deserializer=None,
accelerator_type=None,
endpoint_name=None,
use_compiled_model=False,
wait=True,
model_name=None,
kms_key=None,
data_capture_config=None,
tags=None,
serverless_inference_config=None,
**kwargs,
):
"""Deploy the trained model to an Amazon SageMaker endpoint.
And then return ``sagemaker.Predictor`` object.
More information:
http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html
Args:
initial_instance_count (int): The initial number of instances to run
in the ``Endpoint`` created from this ``Model``. If not using
serverless inference, then it need to be a number larger or equals
to 1 (default: None)
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge', or 'local' for local mode. If not using
serverless inference, then it is required to deploy a model.
(default: None)
serializer (:class:`~sagemaker.serializers.BaseSerializer`): A
serializer object, used to encode data for an inference endpoint
(default: None). If ``serializer`` is not None, then
``serializer`` will override the default serializer. The
default serializer is set by the ``predictor_cls``.
deserializer (:class:`~sagemaker.deserializers.BaseDeserializer`): A
deserializer object, used to decode data from an inference
endpoint (default: None). If ``deserializer`` is not None, then
``deserializer`` will override the default deserializer. The
default deserializer is set by the ``predictor_cls``.
accelerator_type (str): Type of Elastic Inference accelerator to
attach to an endpoint for model loading and inference, for
example, 'ml.eia1.medium'. If not specified, no Elastic
Inference accelerator will be attached to the endpoint. For more
information:
https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html
endpoint_name (str): Name to use for creating an Amazon SageMaker
endpoint. If not specified, the name of the training job is
used.
use_compiled_model (bool): Flag to select whether to use compiled
(optimized) model. Default: False.
wait (bool): Whether the call should wait until the deployment of
model completes (default: True).
model_name (str): Name to use for creating an Amazon SageMaker
model. If not specified, the estimator generates a default job name
based on the training image name and current timestamp.
kms_key (str): The ARN of the KMS key that is used to encrypt the
data on the storage volume attached to the instance hosting the
endpoint.
data_capture_config (sagemaker.model_monitor.DataCaptureConfig): Specifies
configuration related to Endpoint data capture for use with
Amazon SageMaker Model Monitoring. Default: None.
serverless_inference_config (sagemaker.serverless.ServerlessInferenceConfig):
Specifies configuration related to serverless endpoint. Use this configuration
when trying to create serverless endpoint and make serverless inference. If
empty object passed through, we will use pre-defined values in
``ServerlessInferenceConfig`` class to deploy serverless endpoint (default: None)
tags(List[dict[str, str]]): Optional. The list of tags to attach to this specific
endpoint. Example:
>>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}]
For more information about tags, see
https://boto3.amazonaws.com/v1/documentation\
/api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
**kwargs: Passed to invocation of ``create_model()``.
Implementations may customize ``create_model()`` to accept
``**kwargs`` to customize model creation during deploy.
For more, see the implementation docs.
Returns:
sagemaker.predictor.Predictor: A predictor that provides a ``predict()`` method,
which can be used to send requests to the Amazon SageMaker
endpoint and obtain inferences.
"""
removed_kwargs("update_endpoint", kwargs)
is_serverless = serverless_inference_config is not None
self._ensure_latest_training_job()
self._ensure_base_job_name()
default_name = name_from_base(self.base_job_name)
endpoint_name = endpoint_name or default_name
model_name = model_name or default_name
self.deploy_instance_type = instance_type
if use_compiled_model and not is_serverless:
family = "_".join(instance_type.split(".")[:-1])
if family not in self._compiled_models:
raise ValueError(
"No compiled model for {}. "
"Please compile one with compile_model before deploying.".format(family)
)
model = self._compiled_models[family]
else:
kwargs["model_kms_key"] = self.output_kms_key
model = self.create_model(**kwargs)
model.name = model_name
return model.deploy(
instance_type=instance_type,
initial_instance_count=initial_instance_count,
serializer=serializer,
deserializer=deserializer,
accelerator_type=accelerator_type,
endpoint_name=endpoint_name,
tags=tags or self.tags,
wait=wait,
kms_key=kms_key,
data_capture_config=data_capture_config,
serverless_inference_config=serverless_inference_config,
)
def register(
self,
content_types,
response_types,
inference_instances,
transform_instances,
image_uri=None,
model_package_name=None,
model_package_group_name=None,
model_metrics=None,
metadata_properties=None,
marketplace_cert=False,
approval_status=None,
description=None,
compile_model_family=None,
model_name=None,
drift_check_baselines=None,
**kwargs,
):
"""Creates a model package for creating SageMaker models or listing on Marketplace.
Args:
content_types (list): The supported MIME types for the input data.
response_types (list): The supported MIME types for the output data.
inference_instances (list): A list of the instance types that are used to
generate inferences in real-time.
transform_instances (list): A list of the instance types on which a transformation
job can be run or on which an endpoint can be deployed.
image_uri (str): The container image uri for Model Package, if not specified,
Estimator's training container image will be used (default: None).
model_package_name (str): Model Package name, exclusive to `model_package_group_name`,
using `model_package_name` makes the Model Package un-versioned (default: None).
model_package_group_name (str): Model Package Group name, exclusive to
`model_package_name`, using `model_package_group_name` makes the Model Package
versioned (default: None).
model_metrics (ModelMetrics): ModelMetrics object (default: None).
metadata_properties (MetadataProperties): MetadataProperties (default: None).
marketplace_cert (bool): A boolean value indicating if the Model Package is certified
for AWS Marketplace (default: False).
approval_status (str): Model Approval Status, values can be "Approved", "Rejected",
or "PendingManualApproval" (default: "PendingManualApproval").
description (str): Model Package description (default: None).
compile_model_family (str): Instance family for compiled model, if specified, a compiled
model will be used (default: None).
model_name (str): User defined model name (default: None).
drift_check_baselines (DriftCheckBaselines): DriftCheckBaselines object (default: None).
**kwargs: Passed to invocation of ``create_model()``. Implementations may customize
``create_model()`` to accept ``**kwargs`` to customize model creation during
deploy. For more, see the implementation docs.
Returns:
str: A string of SageMaker Model Package ARN.
"""
default_name = name_from_base(self.base_job_name)
model_name = model_name or default_name
if compile_model_family is not None:
model = self._compiled_models[compile_model_family]
else:
if "model_kms_key" not in kwargs:
kwargs["model_kms_key"] = self.output_kms_key
model = self.create_model(image_uri=image_uri, **kwargs)
model.name = model_name
return model.register(
content_types,
response_types,
inference_instances,
transform_instances,
model_package_name,
model_package_group_name,
image_uri,
model_metrics,
metadata_properties,
marketplace_cert,
approval_status,
description,
drift_check_baselines=drift_check_baselines,
)
@property
def model_data(self):
"""str: The model location in S3. Only set if Estimator has been ``fit()``."""
if self.latest_training_job is not None:
model_uri = self.sagemaker_session.sagemaker_client.describe_training_job(
TrainingJobName=self.latest_training_job.name
)["ModelArtifacts"]["S3ModelArtifacts"]
else:
logger.warning(
"No finished training job found associated with this estimator. Please make sure "
"this estimator is only used for building workflow config"
)
model_uri = os.path.join(
self.output_path, self._current_job_name, "output", "model.tar.gz"
)
return model_uri
@abstractmethod
def create_model(self, **kwargs):
"""Create a SageMaker ``Model`` object that can be deployed to an ``Endpoint``.
Args:
**kwargs: Keyword arguments used by the implemented method for
creating the ``Model``.
Returns:
sagemaker.model.Model: A SageMaker ``Model`` object. See
:func:`~sagemaker.model.Model` for full details.
"""
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor.
Args:
job_details: the returned job details from a describe_training_job
API call.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded.
Returns:
dictionary: The transformed init_params
"""
init_params = dict()
init_params["role"] = job_details["RoleArn"]
init_params["instance_count"] = job_details["ResourceConfig"]["InstanceCount"]
init_params["instance_type"] = job_details["ResourceConfig"]["InstanceType"]
init_params["volume_size"] = job_details["ResourceConfig"]["VolumeSizeInGB"]
init_params["max_run"] = job_details["StoppingCondition"]["MaxRuntimeInSeconds"]
init_params["input_mode"] = job_details["AlgorithmSpecification"]["TrainingInputMode"]
init_params["base_job_name"] = base_from_name(job_details["TrainingJobName"])
init_params["output_path"] = job_details["OutputDataConfig"]["S3OutputPath"]
init_params["output_kms_key"] = job_details["OutputDataConfig"]["KmsKeyId"]
if "EnableNetworkIsolation" in job_details:
init_params["enable_network_isolation"] = job_details["EnableNetworkIsolation"]
has_hps = "HyperParameters" in job_details
init_params["hyperparameters"] = job_details["HyperParameters"] if has_hps else {}
if "AlgorithmName" in job_details["AlgorithmSpecification"]:
init_params["algorithm_arn"] = job_details["AlgorithmSpecification"]["AlgorithmName"]
elif "TrainingImage" in job_details["AlgorithmSpecification"]:
init_params["image_uri"] = job_details["AlgorithmSpecification"]["TrainingImage"]
else:
raise RuntimeError(
"Invalid AlgorithmSpecification. Either TrainingImage or "
"AlgorithmName is expected. None was found."
)
if "MetricDefinitons" in job_details["AlgorithmSpecification"]:
init_params["metric_definitions"] = job_details["AlgorithmSpecification"][
"MetricsDefinition"
]
if "EnableInterContainerTrafficEncryption" in job_details:
init_params["encrypt_inter_container_traffic"] = job_details[
"EnableInterContainerTrafficEncryption"
]
subnets, security_group_ids = vpc_utils.from_dict(job_details.get(vpc_utils.VPC_CONFIG_KEY))
if subnets:
init_params["subnets"] = subnets
if security_group_ids:
init_params["security_group_ids"] = security_group_ids
if "InputDataConfig" in job_details and model_channel_name:
for channel in job_details["InputDataConfig"]:
if channel["ChannelName"] == model_channel_name:
init_params["model_channel_name"] = model_channel_name
init_params["model_uri"] = channel["DataSource"]["S3DataSource"]["S3Uri"]
break
if job_details.get("EnableManagedSpotTraining", False):
init_params["use_spot_instances"] = True
max_wait = job_details.get("StoppingCondition", {}).get("MaxWaitTimeInSeconds")
if max_wait:
init_params["max_wait"] = max_wait
if job_details.get("RetryStrategy", False):
init_params["max_retry_attempts"] = job_details.get("RetryStrategy", {}).get(
"MaximumRetryAttempts"
)
max_wait = job_details.get("StoppingCondition", {}).get("MaxWaitTimeInSeconds")
if max_wait:
init_params["max_wait"] = max_wait
return init_params
def transformer(
self,
instance_count,
instance_type,
strategy=None,
assemble_with=None,
output_path=None,
output_kms_key=None,
accept=None,
env=None,
max_concurrent_transforms=None,
max_payload=None,
tags=None,
role=None,
volume_kms_key=None,
vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT,
enable_network_isolation=None,
model_name=None,
):
"""Return a ``Transformer`` that uses a SageMaker Model based on the training job.
It reuses the SageMaker Session and base job name used by
the Estimator.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example,
'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in
a single request (default: None). Valid values: 'MultiRecord'
and 'SingleRecord'.
assemble_with (str): How the output is assembled (default: None).
Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If
not specified, results are stored to a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the
transform output (default: None).
accept (str): The accept header passed by the client to
the inference endpoint. If it is supported by the endpoint,
it will be the format of the batch transform output.
env (dict): Environment variables to be set for use during the
transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests
to be made to each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP
request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If
none specified, then the tags used for the training job are used
for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
which is also used during transform jobs. If not specified, the
role from the Estimator will be used.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume
attached to the ML compute instance (default: None).
vpc_config_override (dict[str, list[str]]): Optional override for the
VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
enable_network_isolation (bool): Specifies whether container will
run in network isolation mode. Network isolation mode restricts
the container access to outside networks (such as the internet).
The container does not make any inbound or outbound network
calls. If True, a channel named "code" will be created for any
user entry script for inference. Also known as Internet-free mode.
If not specified, this setting is taken from the estimator's
current configuration.
model_name (str): Name to use for creating an Amazon SageMaker
model. If not specified, the estimator generates a default job name
based on the training image name and current timestamp.
"""
tags = tags or self.tags
model_name = self._get_or_create_name(model_name)
if self.latest_training_job is None:
logger.warning(
"No finished training job found associated with this estimator. Please make sure "
"this estimator is only used for building workflow config"
)
else:
if enable_network_isolation is None:
enable_network_isolation = self.enable_network_isolation()
model = self.create_model(
vpc_config_override=vpc_config_override,
model_kms_key=self.output_kms_key,
enable_network_isolation=enable_network_isolation,
)
# not all create_model() implementations have the same kwargs
model.name = model_name
if role is not None:
model.role = role
model._create_sagemaker_model(instance_type, tags=tags)
return Transformer(
model_name,
instance_count,
instance_type,
strategy=strategy,
assemble_with=assemble_with,
output_path=output_path,
output_kms_key=output_kms_key,
accept=accept,
max_concurrent_transforms=max_concurrent_transforms,
max_payload=max_payload,
env=env,
tags=tags,
base_transform_job_name=self.base_job_name,
volume_kms_key=volume_kms_key,
sagemaker_session=self.sagemaker_session,
)
@property
def training_job_analytics(self):
"""Return a ``TrainingJobAnalytics`` object for the current training job."""
if self._current_job_name is None:
raise ValueError("Estimator is not associated with a TrainingJob")
return TrainingJobAnalytics(
self._current_job_name, sagemaker_session=self.sagemaker_session
)
def get_vpc_config(self, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT):
"""Returns VpcConfig dict either from this Estimator's subnets and security groups.
Or else validate and return an optional override value.
Args:
vpc_config_override:
"""
if vpc_config_override is vpc_utils.VPC_CONFIG_DEFAULT:
return vpc_utils.to_dict(self.subnets, self.security_group_ids)
return vpc_utils.sanitize(vpc_config_override)
def _ensure_latest_training_job(
self, error_message="Estimator is not associated with a training job"
):
"""Placeholder docstring"""
if self.latest_training_job is None:
raise ValueError(error_message)
delete_endpoint = removed_function("delete_endpoint")
def enable_default_profiling(self):
"""Update training job to enable Debugger monitoring.
This method enables Debugger monitoring with
the default ``profiler_config`` parameter to collect system
metrics and the default built-in ``profiler_report`` rule.
Framework metrics won't be saved.
To update training job to emit framework metrics, you can use
:class:`~sagemaker.estimator.Estimator.update_profiler`
method and specify the framework metrics you want to enable.
This method is callable when the training job is in progress while
Debugger monitoring is disabled.
"""
self._ensure_latest_training_job()
training_job_details = self.latest_training_job.describe()
if training_job_details.get("ProfilingStatus") == "Enabled":
raise ValueError(
"Debugger monitoring is already enabled. To update the profiler_config parameter "
"and the Debugger profiling rules, please use the update_profiler function."
)
if "ProfilerConfig" in training_job_details and training_job_details["ProfilerConfig"].get(
"S3OutputPath"
):
self.profiler_config = ProfilerConfig(
s3_output_path=training_job_details["ProfilerConfig"]["S3OutputPath"]
)
else:
self.profiler_config = ProfilerConfig(s3_output_path=self.output_path)
self.profiler_rules = [get_default_profiler_rule()]
self.profiler_rule_configs = self._prepare_profiler_rules()
_TrainingJob.update(
self, self.profiler_rule_configs, self.profiler_config._to_request_dict()
)
def disable_profiling(self):
"""Update the current training job in progress to disable profiling.
Debugger stops collecting the system and framework metrics
and turns off the Debugger built-in monitoring and profiling rules.
"""
self._ensure_latest_training_job()
training_job_details = self.latest_training_job.describe()
if training_job_details.get("ProfilingStatus") == "Disabled":
raise ValueError("Profiler is already disabled.")
_TrainingJob.update(
self, profiler_config=ProfilerConfig._to_profiler_disabled_request_dict()
)
def update_profiler(
self,
rules=None,
system_monitor_interval_millis=None,
s3_output_path=None,
framework_profile_params=None,
disable_framework_metrics=False,
):
"""Update training jobs to enable profiling.
This method updates the ``profiler_config`` parameter
and initiates Debugger built-in rules for profiling.
Args:
rules (list[:class:`~sagemaker.debugger.ProfilerRule`]): A list of
:class:`~sagemaker.debugger.ProfilerRule` objects to define
rules for continuous analysis with SageMaker Debugger. Currently, you can
only add new profiler rules during the training job. (default: ``None``)
s3_output_path (str): The location in S3 to store the output. If profiler is enabled
once, s3_output_path cannot be changed. (default: ``None``)
system_monitor_interval_millis (int): How often profiling system metrics are
collected; Unit: Milliseconds (default: ``None``)
framework_profile_params (:class:`~sagemaker.debugger.FrameworkProfile`):
A parameter object for framework metrics profiling. Configure it using
the :class:`~sagemaker.debugger.FrameworkProfile` class.
To use the default framework profile parameters, pass ``FrameworkProfile()``.
For more information about the default values,
see :class:`~sagemaker.debugger.FrameworkProfile`. (default: ``None``)
disable_framework_metrics (bool): Specify whether to disable all the framework metrics.
This won't update system metrics and the Debugger built-in rules for monitoring.
To stop both monitoring and profiling,
use the :class:`~sagemaker.estimator.Estimator.desable_profiling`
method. (default: ``False``)
.. attention::
Updating the profiling configuration for TensorFlow dataloader profiling
is currently not available. If you started a TensorFlow training job only with
monitoring and want to enable profiling while the training job is running,
the dataloader profiling cannot be updated.
"""
self._ensure_latest_training_job()
if (
not rules
and not system_monitor_interval_millis
and not s3_output_path
and not framework_profile_params
and not disable_framework_metrics
):
raise ValueError("Please provide profiler config or profiler rule to be updated.")
if disable_framework_metrics and framework_profile_params:
raise ValueError(
"framework_profile_params cannot be set when disable_framework_metrics is True"
)
profiler_config_request_dict = None
profiler_rule_configs = None
if rules:
for rule in rules:
if not isinstance(rule, ProfilerRule):
raise ValueError("Please provide ProfilerRule to be updated.")
self.profiler_rules = rules
profiler_rule_configs = self._prepare_profiler_rules()
if disable_framework_metrics:
empty_framework_profile_param = FrameworkProfile()
empty_framework_profile_param.profiling_parameters = {}
self.profiler_config = ProfilerConfig(
s3_output_path=s3_output_path,
system_monitor_interval_millis=system_monitor_interval_millis,
framework_profile_params=empty_framework_profile_param,
)
else:
self.profiler_config = ProfilerConfig(
s3_output_path=s3_output_path,
system_monitor_interval_millis=system_monitor_interval_millis,
framework_profile_params=framework_profile_params,
)
profiler_config_request_dict = self.profiler_config._to_request_dict()
_TrainingJob.update(self, profiler_rule_configs, profiler_config_request_dict)
class _TrainingJob(_Job):
"""Placeholder docstring"""
@classmethod
def start_new(cls, estimator, inputs, experiment_config):
"""Create a new Amazon SageMaker training job from the estimator.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object
created by the user.
inputs (str): Parameters used when called
:meth:`~sagemaker.estimator.EstimatorBase.fit`.
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
Returns:
sagemaker.estimator._TrainingJob: Constructed object that captures
all information about the started training job.
"""
train_args = cls._get_train_args(estimator, inputs, experiment_config)
estimator.sagemaker_session.train(**train_args)
return cls(estimator.sagemaker_session, estimator._current_job_name)
@classmethod
def _get_train_args(cls, estimator, inputs, experiment_config):
"""Constructs a dict of arguments for an Amazon SageMaker training job from the estimator.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object
created by the user.
inputs (str): Parameters used when called
:meth:`~sagemaker.estimator.EstimatorBase.fit`.
experiment_config (dict[str, str]): Experiment management configuration.
Optionally, the dict can contain three keys:
'ExperimentName', 'TrialName', and 'TrialComponentDisplayName'.
The behavior of setting these keys is as follows:
* If `ExperimentName` is supplied but `TrialName` is not a Trial will be
automatically created and the job's Trial Component associated with the Trial.
* If `TrialName` is supplied and the Trial already exists the job's Trial Component
will be associated with the Trial.
* If both `ExperimentName` and `TrialName` are not supplied the trial component
will be unassociated.
* `TrialComponentDisplayName` is used for display in Studio.
Returns:
Dict: dict for `sagemaker.session.Session.train` method
"""
local_mode = estimator.sagemaker_session.local_mode
model_uri = estimator.model_uri
# Allow file:// input only in local mode
if cls._is_local_channel(inputs) or cls._is_local_channel(model_uri):
if not local_mode:
raise ValueError(
"File URIs are supported in local mode only. Please use a S3 URI instead."
)
config = _Job._load_config(inputs, estimator)
current_hyperparameters = estimator.hyperparameters()
if current_hyperparameters is not None:
hyperparameters = {
str(k): (v if isinstance(v, (Parameter, Expression, Properties)) else str(v))
for (k, v) in current_hyperparameters.items()
}
train_args = config.copy()
train_args["input_mode"] = estimator.input_mode
train_args["job_name"] = estimator._current_job_name
train_args["hyperparameters"] = hyperparameters
train_args["tags"] = estimator.tags
train_args["metric_definitions"] = estimator.metric_definitions
train_args["experiment_config"] = experiment_config
train_args["environment"] = estimator.environment
if isinstance(inputs, TrainingInput):
if "InputMode" in inputs.config:
logger.debug(
"Selecting TrainingInput's input_mode (%s) for TrainingInputMode.",
inputs.config["InputMode"],
)
train_args["input_mode"] = inputs.config["InputMode"]
if estimator.enable_network_isolation():
train_args["enable_network_isolation"] = True
if estimator.max_retry_attempts is not None:
train_args["retry_strategy"] = {"MaximumRetryAttempts": estimator.max_retry_attempts}
else:
train_args["retry_strategy"] = None
if estimator.encrypt_inter_container_traffic:
train_args["encrypt_inter_container_traffic"] = True
if isinstance(estimator, sagemaker.algorithm.AlgorithmEstimator):
train_args["algorithm_arn"] = estimator.algorithm_arn
else:
train_args["image_uri"] = estimator.training_image_uri()
if estimator.debugger_rule_configs:
train_args["debugger_rule_configs"] = estimator.debugger_rule_configs
if estimator.debugger_hook_config:
estimator.debugger_hook_config.collection_configs = estimator.collection_configs
train_args["debugger_hook_config"] = estimator.debugger_hook_config._to_request_dict()
if estimator.tensorboard_output_config:
train_args[
"tensorboard_output_config"
] = estimator.tensorboard_output_config._to_request_dict()
cls._add_spot_checkpoint_args(local_mode, estimator, train_args)
if estimator.enable_sagemaker_metrics is not None:
train_args["enable_sagemaker_metrics"] = estimator.enable_sagemaker_metrics
if estimator.profiler_rule_configs:
train_args["profiler_rule_configs"] = estimator.profiler_rule_configs
if estimator.profiler_config:
train_args["profiler_config"] = estimator.profiler_config._to_request_dict()
return train_args
@classmethod
def _add_spot_checkpoint_args(cls, local_mode, estimator, train_args):
"""Placeholder docstring"""
if estimator.use_spot_instances:
if local_mode:
raise ValueError("Spot training is not supported in local mode.")
train_args["use_spot_instances"] = True
if estimator.checkpoint_s3_uri:
if local_mode:
raise ValueError("Setting checkpoint_s3_uri is not supported in local mode.")
train_args["checkpoint_s3_uri"] = estimator.checkpoint_s3_uri
if estimator.checkpoint_local_path:
if local_mode:
raise ValueError("Setting checkpoint_local_path is not supported in local mode.")
train_args["checkpoint_local_path"] = estimator.checkpoint_local_path
@classmethod
def _is_local_channel(cls, input_uri):
"""Placeholder docstring"""
return isinstance(input_uri, string_types) and input_uri.startswith("file://")
@classmethod
def update(cls, estimator, profiler_rule_configs=None, profiler_config=None):
"""Update a running Amazon SageMaker training job.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object created by the user.
profiler_rule_configs (list): List of profiler rule configurations to be
updated in the training job. (default: ``None``).
profiler_config (dict): Configuration for how profiling information is emitted with
SageMaker Debugger. (default: ``None``).
Returns:
sagemaker.estimator._TrainingJob: Constructed object that captures
all information about the updated training job.
"""
update_args = cls._get_update_args(estimator, profiler_rule_configs, profiler_config)
estimator.sagemaker_session.update_training_job(**update_args)
return estimator.latest_training_job
@classmethod
def _get_update_args(cls, estimator, profiler_rule_configs, profiler_config):
"""Constructs a dict of arguments for updating an Amazon SageMaker training job.
Args:
estimator (sagemaker.estimator.EstimatorBase): Estimator object
created by the user.
profiler_rule_configs (list): List of profiler rule configurations to be
updated in the training job. (default: ``None``).
profiler_config (dict): Configuration for how profiling information is emitted with
SageMaker Debugger. (default: ``None``).
Returns:
Dict: dict for `sagemaker.session.Session.update_training_job` method
"""
update_args = {"job_name": estimator.latest_training_job.name}
update_args.update(build_dict("profiler_rule_configs", profiler_rule_configs))
update_args.update(build_dict("profiler_config", profiler_config))
return update_args
def wait(self, logs="All"):
"""Placeholder docstring.
Args:
logs ([str]): A list of strings specifying which logs to print. Acceptable
strings are "All", "None", "Training", or "Rules". To maintain backwards
compatibility, boolean values are also accepted and converted to strings.
"""
# Convert boolean values of logs to strings.
log_string_map = {True: "All", False: "None"}
if isinstance(logs, bool):
logs = log_string_map[logs]
# If logs are requested, call logs_for_jobs.
if logs != "None":
self.sagemaker_session.logs_for_job(self.job_name, wait=True, log_type=logs)
else:
self.sagemaker_session.wait_for_job(self.job_name)
def describe(self):
"""Returns a response from the DescribeTrainingJob API call."""
return self.sagemaker_session.describe_training_job(self.job_name)
def rule_job_summary(self):
"""Calls describe_training_job and returns two dictionaries.
Returns:
list[dict]: A list of DebugRuleEvaluationStatuses and ProfilerRuleEvaluationStatuses
dictionary.
"""
job_summary = self.describe()
rule_eval_statuses = job_summary.get("DebugRuleEvaluationStatuses") or []
rule_eval_statuses.extend(job_summary.get("ProfilerRuleEvaluationStatuses") or [])
return rule_eval_statuses
def stop(self):
"""Stops the training job."""
self.sagemaker_session.stop_training_job(self.name)
class Estimator(EstimatorBase):
"""A generic Estimator to train using any supplied algorithm.
This class is designed for use with algorithms that don't have their own, custom class.
"""
def __init__(
self,
image_uri,
role,
instance_count=None,
instance_type=None,
volume_size=30,
volume_kms_key=None,
max_run=24 * 60 * 60,
input_mode="File",
output_path=None,
output_kms_key=None,
base_job_name=None,
sagemaker_session=None,
hyperparameters=None,
tags=None,
subnets=None,
security_group_ids=None,
model_uri=None,
model_channel_name="model",
metric_definitions=None,
encrypt_inter_container_traffic=False,
use_spot_instances=False,
max_wait=None,
checkpoint_s3_uri=None,
checkpoint_local_path=None,
enable_network_isolation=False,
rules=None,
debugger_hook_config=None,
tensorboard_output_config=None,
enable_sagemaker_metrics=None,
profiler_config=None,
disable_profiler=False,
environment=None,
max_retry_attempts=None,
**kwargs,
):
"""Initialize an ``Estimator`` instance.
Args:
image_uri (str): The container image to use for training.
role (str): An AWS IAM role (either name or full ARN). The Amazon
SageMaker training jobs and APIs that create Amazon SageMaker
endpoints use this role to access training data and model
artifacts. After the endpoint is created, the inference code
might use the IAM role, if it needs to access an AWS resource.
instance_count (int): Number of Amazon EC2 instances to use
for training.
instance_type (str): Type of EC2 instance to use for training,
for example, 'ml.c4.xlarge'.
volume_size (int): Size in GB of the EBS volume to use for
storing input data during training (default: 30). Must be large
enough to store training data if File Mode is used (which is the
default).
volume_kms_key (str): Optional. KMS key ID for encrypting EBS
volume attached to the training instance (default: None).
max_run (int): Timeout in seconds for training (default: 24 *
60 * 60). After this amount of time Amazon SageMaker terminates
the job regardless of its current status.
input_mode (str): The input mode that the algorithm supports
(default: 'File'). Valid modes:
* 'File' - Amazon SageMaker copies the training dataset from the
S3 location to a local directory.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the
container via a Unix-named pipe.
This argument can be overriden on a per-channel basis using
``sagemaker.inputs.TrainingInput.input_mode``.
output_path (str): S3 location for saving the training result (model
artifacts and output files). If not specified, results are
stored to a default bucket. If the bucket with the specific name
does not exist, the estimator creates the bucket during the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method execution.
output_kms_key (str): Optional. KMS key ID for encrypting the
training output (default: None).
base_job_name (str): Prefix for training job name when the
:meth:`~sagemaker.estimator.EstimatorBase.fit` method launches.
If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
hyperparameters (dict): Dictionary containing the hyperparameters to
initialize this estimator with.
tags (list[dict]): List of tags for labeling a training job. For
more, see
https://docs.aws.amazon.com/sagemaker/latest/dg/API_Tag.html.
subnets (list[str]): List of subnet ids. If not specified training
job will be created without VPC config.
security_group_ids (list[str]): List of security group ids. If not
specified training job will be created without VPC config.
model_uri (str): URI where a pre-trained model is stored, either
locally or in S3 (default: None). If specified, the estimator
will create a channel pointing to the model so the training job
can download it. This model can be a 'model.tar.gz' from a
previous training job, or other artifacts coming from a
different source.
In local mode, this should point to the path in which the model
is located and not the file itself, as local Docker containers
will try to mount the URI as a volume.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/cdf-training.html#td-deserialization
model_channel_name (str): Name of the channel where 'model_uri' will
be downloaded (default: 'model').
metric_definitions (list[dict]): A list of dictionaries that defines
the metric(s) used to evaluate the training jobs. Each
dictionary contains two keys: 'Name' for the name of the metric,
and 'Regex' for the regular expression used to extract the
metric from the logs. This should be defined only for jobs that
don't use an Amazon algorithm.
encrypt_inter_container_traffic (bool): Specifies whether traffic
between training containers is encrypted for the training job
(default: ``False``).
use_spot_instances (bool): Specifies whether to use SageMaker
Managed Spot instances for training. If enabled then the
``max_wait`` arg should also be set.
More information:
https://docs.aws.amazon.com/sagemaker/latest/dg/model-managed-spot-training.html
(default: ``False``).
max_wait (int): Timeout in seconds waiting for spot training
instances (default: None). After this amount of time Amazon
SageMaker will stop waiting for Spot instances to become
available (default: ``None``).
checkpoint_s3_uri (str): The S3 URI in which to persist checkpoints
that the algorithm persists (if any) during training. (default:
``None``).
checkpoint_local_path (str): The local path that the algorithm
writes its checkpoints to. SageMaker will persist all files
under this path to `checkpoint_s3_uri` continually during
training. On job startup the reverse happens - data from the
s3 location is downloaded to this path before the algorithm is
started. If the path is unset then SageMaker assumes the
checkpoints will be provided under `/opt/ml/checkpoints/`.
(default: ``None``).
enable_network_isolation (bool): Specifies whether container will
run in network isolation mode (default: ``False``). Network
isolation mode restricts the container access to outside networks
(such as the Internet). The container does not make any inbound or
outbound network calls. Also known as Internet-free mode.
rules (list[:class:`~sagemaker.debugger.RuleBase`]): A list of
:class:`~sagemaker.debugger.RuleBase` objects used to define
SageMaker Debugger rules for real-time analysis
(default: ``None``). For more information,
see `Continuous analyses through rules
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html
#continuous-analyses-through-rules)>`_.
debugger_hook_config (:class:`~sagemaker.debugger.DebuggerHookConfig` or bool):
Configuration for how debugging information is emitted with
SageMaker Debugger. If not specified, a default one is created using
the estimator's ``output_path``, unless the region does not
support SageMaker Debugger. To disable SageMaker Debugger,
set this parameter to ``False``. For more information, see
`Capture real-time debugging data during model training in Amazon SageMaker
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-debugging-data-during-model-training-in-amazon-sagemaker>`_.
tensorboard_output_config (:class:`~sagemaker.debugger.TensorBoardOutputConfig`):
Configuration for customizing debugging visualization using TensorBoard
(default: ``None``). For more information,
see `Capture real time tensorboard data
<https://sagemaker.readthedocs.io/en/stable/amazon_sagemaker_debugger.html#
capture-real-time-tensorboard-data-from-the-debugging-hook>`_.
enable_sagemaker_metrics (bool): enable SageMaker Metrics Time
Series. For more information, see `AlgorithmSpecification API
<https://docs.aws.amazon.com/sagemaker/latest/dg/
API_AlgorithmSpecification.html#SageMaker-Type-AlgorithmSpecification-
EnableSageMakerMetricsTimeSeries>`_.
(default: ``None``).
profiler_config (:class:`~sagemaker.debugger.ProfilerConfig`):
Configuration for how SageMaker Debugger collects
monitoring and profiling information from your training job.
If not specified, Debugger will be configured with
a default configuration and will save system and framework metrics
the estimator's default ``output_path`` in Amazon S3.
Use :class:`~sagemaker.debugger.ProfilerConfig` to configure this parameter.
To disable SageMaker Debugger monitoring and profiling, set the
``disable_profiler`` parameter to ``True``.
disable_profiler (bool): Specifies whether Debugger monitoring and profiling
will be disabled (default: ``False``).
environment (dict[str, str]) : Environment variables to be set for
use during training job (default: ``None``)
max_retry_attempts (int): The number of times to move a job to the STARTING status.
You can specify between 1 and 30 attempts.
If the value of attempts is greater than zero,
the job is retried on InternalServerFailure
the same number of attempts as the value.
You can cap the total duration for your job by setting ``max_wait`` and ``max_run``
(default: ``None``)
"""
self.image_uri = image_uri
self.hyperparam_dict = hyperparameters.copy() if hyperparameters else {}
super(Estimator, self).__init__(
role,
instance_count,
instance_type,
volume_size,
volume_kms_key,
max_run,
input_mode,
output_path,
output_kms_key,
base_job_name,
sagemaker_session,
tags,
subnets,
security_group_ids,
model_uri=model_uri,
model_channel_name=model_channel_name,
metric_definitions=metric_definitions,
encrypt_inter_container_traffic=encrypt_inter_container_traffic,
use_spot_instances=use_spot_instances,
max_wait=max_wait,
checkpoint_s3_uri=checkpoint_s3_uri,
checkpoint_local_path=checkpoint_local_path,
rules=rules,
debugger_hook_config=debugger_hook_config,
tensorboard_output_config=tensorboard_output_config,
enable_sagemaker_metrics=enable_sagemaker_metrics,
enable_network_isolation=enable_network_isolation,
profiler_config=profiler_config,
disable_profiler=disable_profiler,
environment=environment,
max_retry_attempts=max_retry_attempts,
**kwargs,
)
def training_image_uri(self):
"""Returns the docker image to use for training.
The fit() method, that does the model training, calls this method to
find the image to use for model training.
"""
return self.image_uri
def set_hyperparameters(self, **kwargs):
"""Sets the hyperparameter dictionary to use for training.
The hyperparameters are made accessible as a dict[str, str] to the
training code on SageMaker. For convenience, this accepts other types
for keys and values, but ``str()`` will be called to convert them before
training.
"""
for k, v in kwargs.items():
self.hyperparam_dict[k] = v
def hyperparameters(self):
"""Returns the hyperparameters as a dictionary to use for training.
The fit() method, that does the model training, calls this method to
find the hyperparameters you specified.
"""
return self.hyperparam_dict
def create_model(
self,
role=None,
image_uri=None,
predictor_cls=None,
vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT,
**kwargs,
):
"""Create a model to deploy.
The serializer and deserializer arguments are only used to define a
default Predictor. They are ignored if an explicit predictor class is passed in.
Other arguments are passed through to the Model class.
Args:
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
which is also used during transform jobs. If not specified, the
role from the Estimator will be used.
image_uri (str): A Docker image URI to use for deploying the model.
Defaults to the image used for training.
predictor_cls (Predictor): The predictor class to use when
deploying the model.
vpc_config_override (dict[str, list[str]]): Optional override for VpcConfig set on
the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
**kwargs: Additional parameters passed to :class:`~sagemaker.model.Model`
.. tip::
You can find additional parameters for using this method at
:class:`~sagemaker.model.Model`.
Returns:
(sagemaker.model.Model) a Model ready for deployment.
"""
removed_kwargs("serializer", kwargs)
removed_kwargs("deserializer", kwargs)
removed_kwargs("content_type", kwargs)
removed_kwargs("accept", kwargs)
if predictor_cls is None:
def predict_wrapper(endpoint, session):
return Predictor(endpoint, session)
predictor_cls = predict_wrapper
role = role or self.role
if "enable_network_isolation" not in kwargs:
kwargs["enable_network_isolation"] = self.enable_network_isolation()
return Model(
image_uri or self.training_image_uri(),
self.model_data,
role,
vpc_config=self.get_vpc_config(vpc_config_override),
sagemaker_session=self.sagemaker_session,
predictor_cls=predictor_cls,
**kwargs,
)
class Framework(EstimatorBase):
"""Base class that cannot be instantiated directly.
Subclasses define functionality pertaining to specific ML frameworks,
such as training/deployment images and predictor instances.
"""
_framework_name = None
LAUNCH_PS_ENV_NAME = "sagemaker_parameter_server_enabled"
LAUNCH_MPI_ENV_NAME = "sagemaker_mpi_enabled"
LAUNCH_SM_DDP_ENV_NAME = "sagemaker_distributed_dataparallel_enabled"
INSTANCE_TYPE = "sagemaker_instance_type"
MPI_NUM_PROCESSES_PER_HOST = "sagemaker_mpi_num_of_processes_per_host"
MPI_CUSTOM_MPI_OPTIONS = "sagemaker_mpi_custom_mpi_options"
SM_DDP_CUSTOM_MPI_OPTIONS = "sagemaker_distributed_dataparallel_custom_mpi_options"
CONTAINER_CODE_CHANNEL_SOURCEDIR_PATH = "/opt/ml/input/data/code/sourcedir.tar.gz"
def __init__(
self,
entry_point,
source_dir=None,
hyperparameters=None,
container_log_level=logging.INFO,
code_location=None,
image_uri=None,
dependencies=None,
enable_network_isolation=False,
git_config=None,
checkpoint_s3_uri=None,
checkpoint_local_path=None,
enable_sagemaker_metrics=None,
**kwargs,
):
"""Base class initializer.
Subclasses which override ``__init__`` should invoke ``super()``.
Args:
entry_point (str): Path (absolute or relative) to the local Python
source file which should be executed as the entry point to
training. If ``source_dir`` is specified, then ``entry_point``
must point to a file located at the root of ``source_dir``.
If 'git_config' is provided, 'entry_point' should be
a relative location to the Python source file in the Git repo.
Example:
With the following GitHub repo directory structure:
>>> |----- README.md
>>> |----- src
>>> |----- train.py
>>> |----- test.py
You can assign entry_point='src/train.py'.
source_dir (str): Path (absolute, relative or an S3 URI) to a directory
with any other training source code dependencies aside from the entry
point file (default: None). If ``source_dir`` is an S3 URI, it must
point to a tar.gz file. Structure within this directory are preserved
when training on Amazon SageMaker. If 'git_config' is provided,
'source_dir' should be a relative location to a directory in the Git
repo.
.. admonition:: Example
With the following GitHub repo directory structure:
>>> |----- README.md
>>> |----- src
>>> |----- train.py
>>> |----- test.py
and you need 'train.py' as entry point and 'test.py' as
training source code as well, you can assign
entry_point='train.py', source_dir='src'.
hyperparameters (dict): Hyperparameters that will be used for
training (default: None). The hyperparameters are made
accessible as a dict[str, str] to the training code on
SageMaker. For convenience, this accepts other types for keys
and values, but ``str()`` will be called to convert them before
training.
container_log_level (int): Log level to use within the container
(default: logging.INFO). Valid values are defined in the Python
logging module.
code_location (str): The S3 prefix URI where custom code will be
uploaded (default: None) - don't include a trailing slash since
a string prepended with a "/" is appended to ``code_location``. The code
file uploaded to S3 is 'code_location/job-name/source/sourcedir.tar.gz'.
If not specified, the default ``code location`` is s3://output_bucket/job-name/.
image_uri (str): An alternate image name to use instead of the
official Sagemaker image for the framework. This is useful to
run one of the Sagemaker supported frameworks with an image
containing custom dependencies.
dependencies (list[str]): A list of paths to directories (absolute
or relative) with any additional libraries that will be exported
to the container (default: []). The library folders will be
copied to SageMaker in the same folder where the entrypoint is
copied. If 'git_config' is provided, 'dependencies' should be a
list of relative locations to directories with any additional
libraries needed in the Git repo.
.. admonition:: Example
The following call
>>> Estimator(entry_point='train.py',
... dependencies=['my/libs/common', 'virtual-env'])
results in the following inside the container:
>>> $ ls
>>> opt/ml/code
>>> |------ train.py
>>> |------ common
>>> |------ virtual-env
This is not supported with "local code" in Local Mode.
enable_network_isolation (bool): Specifies whether container will
run in network isolation mode. Network isolation mode restricts
the container access to outside networks (such as the internet).
The container does not make any inbound or outbound network
calls. If True, a channel named "code" will be created for any
user entry script for training. The user entry script, files in
source_dir (if specified), and dependencies will be uploaded in
a tar to S3. Also known as internet-free mode (default: `False`).
git_config (dict[str, str]): Git configurations used for cloning
files, including ``repo``, ``branch``, ``commit``,
``2FA_enabled``, ``username``, ``password`` and ``token``. The
``repo`` field is required. All other fields are optional.
``repo`` specifies the Git repository where your training script
is stored. If you don't provide ``branch``, the default value
'master' is used. If you don't provide ``commit``, the latest
commit in the specified branch is used. .. admonition:: Example
The following config:
>>> git_config = {'repo': 'https://github.com/aws/sagemaker-python-sdk.git',
>>> 'branch': 'test-branch-git-config',
>>> 'commit': '329bfcf884482002c05ff7f44f62599ebc9f445a'}
results in cloning the repo specified in 'repo', then
checkout the 'master' branch, and checkout the specified
commit.
``2FA_enabled``, ``username``, ``password`` and ``token`` are
used for authentication. For GitHub (or other Git) accounts, set
``2FA_enabled`` to 'True' if two-factor authentication is
enabled for the account, otherwise set it to 'False'. If you do
not provide a value for ``2FA_enabled``, a default value of
'False' is used. CodeCommit does not support two-factor
authentication, so do not provide "2FA_enabled" with CodeCommit
repositories.
For GitHub and other Git repos, when SSH URLs are provided, it
doesn't matter whether 2FA is enabled or disabled; you should
either have no passphrase for the SSH key pairs, or have the
ssh-agent configured so that you will not be prompted for SSH
passphrase when you do 'git clone' command with SSH URLs. When
HTTPS URLs are provided: if 2FA is disabled, then either token
or username+password will be used for authentication if provided
(token prioritized); if 2FA is enabled, only token will be used
for authentication if provided. If required authentication info
is not provided, python SDK will try to use local credentials
storage to authenticate. If that fails either, an error message
will be thrown.
For CodeCommit repos, 2FA is not supported, so '2FA_enabled'
should not be provided. There is no token in CodeCommit, so
'token' should not be provided too. When 'repo' is an SSH URL,
the requirements are the same as GitHub-like repos. When 'repo'
is an HTTPS URL, username+password will be used for
authentication if they are provided; otherwise, python SDK will
try to use either CodeCommit credential helper or local
credential storage for authentication.
checkpoint_s3_uri (str): The S3 URI in which to persist checkpoints
that the algorithm persists (if any) during training. (default:
``None``).
checkpoint_local_path (str): The local path that the algorithm
writes its checkpoints to. SageMaker will persist all files
under this path to `checkpoint_s3_uri` continually during
training. On job startup the reverse happens - data from the
s3 location is downloaded to this path before the algorithm is
started. If the path is unset then SageMaker assumes the
checkpoints will be provided under `/opt/ml/checkpoints/`.
(default: ``None``).
enable_sagemaker_metrics (bool): enable SageMaker Metrics Time
Series. For more information see:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_AlgorithmSpecification.html#SageMaker-Type-AlgorithmSpecification-EnableSageMakerMetricsTimeSeries
(default: ``None``).
**kwargs: Additional kwargs passed to the ``EstimatorBase``
constructor.
.. tip::
You can find additional parameters for initializing this class at
:class:`~sagemaker.estimator.EstimatorBase`.
"""
super(Framework, self).__init__(enable_network_isolation=enable_network_isolation, **kwargs)
image_uri = renamed_kwargs("image_name", "image_uri", image_uri, kwargs)
if entry_point.startswith("s3://"):
raise ValueError(
"Invalid entry point script: {}. Must be a path to a local file.".format(
entry_point
)
)
self.entry_point = entry_point
self.git_config = git_config
self.source_dir = source_dir
self.dependencies = dependencies or []
self.uploaded_code = None
self.container_log_level = container_log_level
self.code_location = code_location
self.image_uri = image_uri
self._hyperparameters = hyperparameters or {}
self.checkpoint_s3_uri = checkpoint_s3_uri
self.checkpoint_local_path = checkpoint_local_path
self.enable_sagemaker_metrics = enable_sagemaker_metrics
def _prepare_for_training(self, job_name=None):
"""Set hyperparameters needed for training. This method will also validate ``source_dir``.
Args:
* job_name (str): Name of the training job to be created. If not
specified, one is generated, using the base name given to the
constructor if applicable.
"""
super(Framework, self)._prepare_for_training(job_name=job_name)
if self.git_config:
updated_paths = git_utils.git_clone_repo(
self.git_config, self.entry_point, self.source_dir, self.dependencies
)
self.entry_point = updated_paths["entry_point"]
self.source_dir = updated_paths["source_dir"]
self.dependencies = updated_paths["dependencies"]
# validate source dir will raise a ValueError if there is something wrong with the
# source directory. We are intentionally not handling it because this is a critical error.
if self.source_dir and not self.source_dir.lower().startswith("s3://"):
validate_source_dir(self.entry_point, self.source_dir)
# if we are in local mode with local_code=True. We want the container to just
# mount the source dir instead of uploading to S3.
local_code = get_config_value("local.local_code", self.sagemaker_session.config)
if self.sagemaker_session.local_mode and local_code:
# if there is no source dir, use the directory containing the entry point.
if self.source_dir is None:
self.source_dir = os.path.dirname(self.entry_point)
self.entry_point = os.path.basename(self.entry_point)
code_dir = "file://" + self.source_dir
script = self.entry_point
elif self.enable_network_isolation() and self.entry_point:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.CONTAINER_CODE_CHANNEL_SOURCEDIR_PATH
script = self.uploaded_code.script_name
self.code_uri = self.uploaded_code.s3_prefix
else:
self.uploaded_code = self._stage_user_code_in_s3()
code_dir = self.uploaded_code.s3_prefix
script = self.uploaded_code.script_name
# Modify hyperparameters in-place to point to the right code directory and script URIs
self._hyperparameters[DIR_PARAM_NAME] = code_dir
self._hyperparameters[SCRIPT_PARAM_NAME] = script
self._hyperparameters[CONTAINER_LOG_LEVEL_PARAM_NAME] = self.container_log_level
self._hyperparameters[JOB_NAME_PARAM_NAME] = self._current_job_name
self._hyperparameters[SAGEMAKER_REGION_PARAM_NAME] = self.sagemaker_session.boto_region_name
self._validate_and_set_debugger_configs()
def _validate_and_set_debugger_configs(self):
"""Set defaults for debugging."""
if self.debugger_hook_config is None and _region_supports_debugger(
self.sagemaker_session.boto_region_name
):
self.debugger_hook_config = DebuggerHookConfig(s3_output_path=self.output_path)
elif not self.debugger_hook_config:
# set hook config to False if _region_supports_debugger is False
self.debugger_hook_config = False
# Disable debugger if checkpointing is enabled by the customer
if self.checkpoint_s3_uri and self.checkpoint_local_path and self.debugger_hook_config:
if self._framework_name in {"mxnet", "pytorch", "tensorflow"}:
if self.instance_count > 1 or (
hasattr(self, "distribution")
and self.distribution is not None # pylint: disable=no-member
):
logger.info(
"SMDebug Does Not Currently Support \
Distributed Training Jobs With Checkpointing Enabled"
)
self.debugger_hook_config = False
if self.debugger_hook_config is False:
if self.environment is None:
self.environment = {}
self.environment[DEBUGGER_FLAG] = "0"
def _stage_user_code_in_s3(self):
"""Upload the user training script to s3 and return the location.
Returns: s3 uri
"""
local_mode = self.output_path.startswith("file://")
if self.code_location is None and local_mode:
code_bucket = self.sagemaker_session.default_bucket()
code_s3_prefix = "{}/{}".format(self._current_job_name, "source")
kms_key = None
elif self.code_location is None:
code_bucket, _ = parse_s3_url(self.output_path)
code_s3_prefix = "{}/{}".format(self._current_job_name, "source")
kms_key = self.output_kms_key
elif local_mode:
code_bucket, key_prefix = parse_s3_url(self.code_location)
code_s3_prefix = "/".join(filter(None, [key_prefix, self._current_job_name, "source"]))
kms_key = None
else:
code_bucket, key_prefix = parse_s3_url(self.code_location)
code_s3_prefix = "/".join(filter(None, [key_prefix, self._current_job_name, "source"]))
output_bucket, _ = parse_s3_url(self.output_path)
kms_key = self.output_kms_key if code_bucket == output_bucket else None
return tar_and_upload_dir(
session=self.sagemaker_session.boto_session,
bucket=code_bucket,
s3_key_prefix=code_s3_prefix,
script=self.entry_point,
directory=self.source_dir,
dependencies=self.dependencies,
kms_key=kms_key,
s3_resource=self.sagemaker_session.s3_resource,
settings=self.sagemaker_session.settings,
)
def _model_source_dir(self):
"""Get the appropriate value to pass as ``source_dir`` to a model constructor.
Returns:
str: Either a local or an S3 path pointing to the ``source_dir`` to be
used for code by the model to be deployed
"""
if self.sagemaker_session.local_mode:
return self.source_dir
if self.uploaded_code is not None:
return self.uploaded_code.s3_prefix
return None
def _model_entry_point(self):
"""Get the appropriate value to pass as ``entry_point`` to a model constructor.
Returns:
str: The path to the entry point script. This can be either an absolute path or
a path relative to ``self._model_source_dir()``.
"""
if self.sagemaker_session.local_mode or (self._model_source_dir() is None):
return self.entry_point
if self.uploaded_code is not None:
return self.uploaded_code.script_name
return None
def hyperparameters(self):
"""Return the hyperparameters as a dictionary to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which
trains the model, calls this method to find the hyperparameters.
Returns:
dict[str, str]: The hyperparameters.
"""
return self._json_encode_hyperparameters(self._hyperparameters)
@classmethod
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None):
"""Convert the job description to init params that can be handled by the class constructor.
Args:
job_details: the returned job details from a describe_training_job
API call.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded
Returns:
dictionary: The transformed init_params
"""
init_params = super(Framework, cls)._prepare_init_params_from_job_description(
job_details, model_channel_name
)
init_params["entry_point"] = json.loads(
init_params["hyperparameters"].get(SCRIPT_PARAM_NAME)
)
init_params["source_dir"] = json.loads(init_params["hyperparameters"].get(DIR_PARAM_NAME))
init_params["container_log_level"] = json.loads(
init_params["hyperparameters"].get(CONTAINER_LOG_LEVEL_PARAM_NAME)
)
hyperparameters = {}
for k, v in init_params["hyperparameters"].items():
# Tuning jobs add this special hyperparameter which is not JSON serialized
if k == "_tuning_objective_metric":
if v.startswith('"') and v.endswith('"'):
v = v.strip('"')
hyperparameters[k] = v
else:
hyperparameters[k] = json.loads(v)
init_params["hyperparameters"] = hyperparameters
return init_params
def training_image_uri(self):
"""Return the Docker image to use for training.
The :meth:`~sagemaker.estimator.EstimatorBase.fit` method, which does
the model training, calls this method to find the image to use for model
training.
Returns:
str: The URI of the Docker image.
"""
if self.image_uri:
return self.image_uri
if hasattr(self, "distribution"):
distribution = self.distribution # pylint: disable=no-member
else:
distribution = None
compiler_config = getattr(self, "compiler_config", None)
if hasattr(self, "tensorflow_version") or hasattr(self, "pytorch_version"):
processor = image_uris._processor(self.instance_type, ["cpu", "gpu"])
is_native_huggingface_gpu = processor == "gpu" and not compiler_config
container_version = "cu110-ubuntu18.04" if is_native_huggingface_gpu else None
if self.tensorflow_version is not None: # pylint: disable=no-member
base_framework_version = (
f"tensorflow{self.tensorflow_version}" # pylint: disable=no-member
)
else:
base_framework_version = (
f"pytorch{self.pytorch_version}" # pylint: disable=no-member
)
else:
container_version = None
base_framework_version = None
return image_uris.retrieve(
self._framework_name,
self.sagemaker_session.boto_region_name,
instance_type=self.instance_type,
version=self.framework_version, # pylint: disable=no-member
py_version=self.py_version, # pylint: disable=no-member
image_scope="training",
distribution=distribution,
base_framework_version=base_framework_version,
container_version=container_version,
training_compiler_config=compiler_config,
)
@classmethod
def attach(cls, training_job_name, sagemaker_session=None, model_channel_name="model"):
"""Attach to an existing training job.
Create an Estimator bound to an existing training job, each subclass
is responsible to implement
``_prepare_init_params_from_job_description()`` as this method delegates
the actual conversion of a training job description to the arguments
that the class constructor expects. After attaching, if the training job
has a Complete status, it can be ``deploy()`` ed to create a SageMaker
Endpoint and return a ``Predictor``.
If the training job is in progress, attach will block until the training job
completes, but logs of the training job will not display. To see the logs
content, please call ``logs()``
Examples:
>>> my_estimator.fit(wait=False)
>>> training_job_name = my_estimator.latest_training_job.name
Later on:
>>> attached_estimator = Estimator.attach(training_job_name)
>>> attached_estimator.logs()
>>> attached_estimator.deploy()
Args:
training_job_name (str): The name of the training job to attach to.
sagemaker_session (sagemaker.session.Session): Session object which
manages interactions with Amazon SageMaker APIs and any other
AWS services needed. If not specified, the estimator creates one
using the default AWS configuration chain.
model_channel_name (str): Name of the channel where pre-trained
model data will be downloaded (default: 'model'). If no channel
with the same name exists in the training job, this option will
be ignored.
Returns:
Instance of the calling ``Estimator`` Class with the attached
training job.
"""
estimator = super(Framework, cls).attach(
training_job_name, sagemaker_session, model_channel_name
)
# pylint gets confused thinking that estimator is an EstimatorBase instance, but it actually
# is a Framework or any of its derived classes. We can safely ignore the no-member errors.
estimator.uploaded_code = UploadedCode(
estimator.source_dir, estimator.entry_point # pylint: disable=no-member
)
return estimator
@staticmethod
def _json_encode_hyperparameters(hyperparameters):
"""Placeholder docstring"""
current_hyperparameters = hyperparameters
if current_hyperparameters is not None:
hyperparameters = {
str(k): (v if isinstance(v, (Parameter, Expression, Properties)) else json.dumps(v))
for (k, v) in current_hyperparameters.items()
}
return hyperparameters
@classmethod
def _update_init_params(cls, hp, tf_arguments):
"""Placeholder docstring"""
updated_params = {}
for argument in tf_arguments:
value = hp.pop(argument, None)
if value is not None:
value = json.loads(value)
updated_params[argument] = value
return updated_params
def transformer(
self,
instance_count,
instance_type,
strategy=None,
assemble_with=None,
output_path=None,
output_kms_key=None,
accept=None,
env=None,
max_concurrent_transforms=None,
max_payload=None,
tags=None,
role=None,
model_server_workers=None,
volume_kms_key=None,
entry_point=None,
vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT,
enable_network_isolation=None,
model_name=None,
):
"""Return a ``Transformer`` that uses a SageMaker Model based on the training job.
It reuses the SageMaker Session and base job name used by
the Estimator.
Args:
instance_count (int): Number of EC2 instances to use.
instance_type (str): Type of EC2 instance to use, for example,
'ml.c4.xlarge'.
strategy (str): The strategy used to decide how to batch records in
a single request (default: None). Valid values: 'MultiRecord'
and 'SingleRecord'.
assemble_with (str): How the output is assembled (default: None).
Valid values: 'Line' or 'None'.
output_path (str): S3 location for saving the transform result. If
not specified, results are stored to a default bucket.
output_kms_key (str): Optional. KMS key ID for encrypting the
transform output (default: None).
accept (str): The accept header passed by the client to
the inference endpoint. If it is supported by the endpoint,
it will be the format of the batch transform output.
env (dict): Environment variables to be set for use during the
transform job (default: None).
max_concurrent_transforms (int): The maximum number of HTTP requests
to be made to each individual transform container at one time.
max_payload (int): Maximum size of the payload in a single HTTP
request to the container in MB.
tags (list[dict]): List of tags for labeling a transform job. If
none specified, then the tags used for the training job are used
for the transform job.
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the ``Model``,
which is also used during transform jobs. If not specified, the
role from the Estimator will be used.
model_server_workers (int): Optional. The number of worker processes
used by the inference server. If None, server will use one
worker per vCPU.
volume_kms_key (str): Optional. KMS key ID for encrypting the volume
attached to the ML compute instance (default: None).
entry_point (str): Path (absolute or relative) to the local Python source file which
should be executed as the entry point to training. If ``source_dir`` is specified,
then ``entry_point`` must point to a file located at the root of ``source_dir``.
If not specified, the training entry point is used.
vpc_config_override (dict[str, list[str]]): Optional override for
the VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
enable_network_isolation (bool): Specifies whether container will
run in network isolation mode. Network isolation mode restricts
the container access to outside networks (such as the internet).
The container does not make any inbound or outbound network
calls. If True, a channel named "code" will be created for any
user entry script for inference. Also known as Internet-free mode.
If not specified, this setting is taken from the estimator's
current configuration.
model_name (str): Name to use for creating an Amazon SageMaker
model. If not specified, the estimator generates a default job name
based on the training image name and current timestamp.
Returns:
sagemaker.transformer.Transformer: a ``Transformer`` object that can be used to start a
SageMaker Batch Transform job.
"""
role = role or self.role
tags = tags or self.tags
model_name = self._get_or_create_name(model_name)
if self.latest_training_job is not None:
if enable_network_isolation is None:
enable_network_isolation = self.enable_network_isolation()
model = self.create_model(
role=role,
model_server_workers=model_server_workers,
entry_point=entry_point,
vpc_config_override=vpc_config_override,
model_kms_key=self.output_kms_key,
enable_network_isolation=enable_network_isolation,
name=model_name,
)
model._create_sagemaker_model(instance_type, tags=tags)
transform_env = model.env.copy()
if env is not None:
transform_env.update(env)
else:
logger.warning(
"No finished training job found associated with this estimator. Please make sure "
"this estimator is only used for building workflow config"
)
transform_env = env or {}
return Transformer(
model_name,
instance_count,
instance_type,
strategy=strategy,
assemble_with=assemble_with,
output_path=output_path,
output_kms_key=output_kms_key,
accept=accept,
max_concurrent_transforms=max_concurrent_transforms,
max_payload=max_payload,
env=transform_env,
tags=tags,
base_transform_job_name=self.base_job_name,
volume_kms_key=volume_kms_key,
sagemaker_session=self.sagemaker_session,
)
def _distribution_configuration(self, distribution):
"""Returns a dict of distribution configurations.
Args:
distribution (dict): A dictionary with information on how to run distributed training.
Returns:
dict that
"""
distribution_config = {}
if "parameter_server" in distribution:
ps_enabled = distribution.get("parameter_server").get("enabled", False)
distribution_config[self.LAUNCH_PS_ENV_NAME] = ps_enabled
if "mpi" in distribution:
mpi_dict = distribution["mpi"]
mpi_enabled = mpi_dict.get("enabled", False)
distribution_config[self.LAUNCH_MPI_ENV_NAME] = mpi_enabled
if mpi_dict.get("processes_per_host"):
distribution_config[self.MPI_NUM_PROCESSES_PER_HOST] = mpi_dict.get(
"processes_per_host"
)
distribution_config[self.MPI_CUSTOM_MPI_OPTIONS] = mpi_dict.get(
"custom_mpi_options", ""
)
if get_mp_parameters(distribution):
distribution_config["mp_parameters"] = get_mp_parameters(distribution)
elif "modelparallel" in distribution.get("smdistributed", {}):
raise ValueError("Cannot use Model Parallelism without MPI enabled!")
if "smdistributed" in distribution:
# smdistributed strategy selected
smdistributed = distribution["smdistributed"]
smdataparallel_enabled = smdistributed.get("dataparallel", {}).get("enabled", False)
distribution_config[self.LAUNCH_SM_DDP_ENV_NAME] = smdataparallel_enabled
distribution_config[self.INSTANCE_TYPE] = self.instance_type
if smdataparallel_enabled:
distribution_config[self.SM_DDP_CUSTOM_MPI_OPTIONS] = smdistributed[
"dataparallel"
].get("custom_mpi_options", "")
return distribution_config
def _s3_uri_prefix(channel_name, s3_data):
"""Placeholder docstring"""
if isinstance(s3_data, TrainingInput):
s3_uri = s3_data.config["DataSource"]["S3DataSource"]["S3Uri"]
else:
s3_uri = s3_data
if not s3_uri.startswith("s3://"):
raise ValueError("Expecting an s3 uri. Got {}".format(s3_uri))
return {channel_name: s3_uri[5:]}
# E.g. 's3://bucket/data' would return 'bucket/data'.
# Also accepts other valid input types, e.g. dict and TrainingInput.
def _s3_uri_without_prefix_from_input(input_data):
# Unpack an input_config object from a dict if a dict was passed in.
"""Placeholder docstring"""
if isinstance(input_data, dict):
response = {}
for channel_name, channel_s3_uri in input_data.items():
response.update(_s3_uri_prefix(channel_name, channel_s3_uri))
return response
if isinstance(input_data, str):
return _s3_uri_prefix("training", input_data)
if isinstance(input_data, TrainingInput):
return _s3_uri_prefix("training", input_data)
raise ValueError(
"Unrecognized type for S3 input data config - not str or TrainingInput: {}".format(
input_data
)
)
| 47.445166
| 166
| 0.637046
|
231cf35ad6ccac3f2ddd0a960cf06228142d8e83
| 594
|
py
|
Python
|
conf/gunicorn-conf.py
|
dshatz/graphsense-REST
|
4f320206bf551c13aa1a42392684b0d4f1155dbb
|
[
"MIT"
] | null | null | null |
conf/gunicorn-conf.py
|
dshatz/graphsense-REST
|
4f320206bf551c13aa1a42392684b0d4f1155dbb
|
[
"MIT"
] | null | null | null |
conf/gunicorn-conf.py
|
dshatz/graphsense-REST
|
4f320206bf551c13aa1a42392684b0d4f1155dbb
|
[
"MIT"
] | null | null | null |
timeout = 300
capture_output = True
accesslog = '/home/dockeruser/gunicorn-access.log'
errorlog = '/home/dockeruser/gunicorn-error.log'
loglevel = 'debug'
bind = "0.0.0.0:9000"
secure_scheme_headers = {
'X-FORWARDED-PROTOCOL': 'ssl',
'X-FORWARDED-PROTO': 'https',
'X-FORWARDED-SSL': 'on'
}
def post_fork(server, worker):
server.log.info('Worker spawned (pid: %s)', worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info('Forked child, re-executing.')
def when_ready(server):
server.log.info('Server is ready. Spawning workers')
| 21.214286
| 59
| 0.690236
|
480ad3fe6e320015b24e5e41106dfb7b16cf1370
| 3,568
|
py
|
Python
|
pysteam/solver/lev_marq_gauss_newton_solver.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | 5
|
2021-10-23T00:35:20.000Z
|
2022-03-22T02:32:43.000Z
|
pysteam/solver/lev_marq_gauss_newton_solver.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | null | null | null |
pysteam/solver/lev_marq_gauss_newton_solver.py
|
utiasASRL/pysteam
|
c0c8809ee2a5e1dab5ce7f9e5ff9de91138ce68b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T21:49:48.000Z
|
2022-02-04T21:49:48.000Z
|
import numpy as np
import numpy.linalg as npla
import scipy.linalg as spla
from . import GaussNewtonSolver
from ..problem import OptimizationProblem
class LevMarqGaussNewtonSolver(GaussNewtonSolver):
def __init__(self, problem: OptimizationProblem, **parameters) -> None:
super().__init__(problem, **parameters)
# override parameters
self._parameters.update({
"ratio_threshold": 0.25,
"shrink_coeff": 0.1,
"grow_coeff": 10.0,
"max_shrink_steps": 50,
})
self._parameters.update(**parameters)
self._diag_coeff = 1e-7
def linearize_solve_and_update(self):
# initialize new cost with old cost in case of failure
new_cost = self._prev_cost
# build the system
A, b = self.build_gauss_newton_terms()
grad_norm = npla.norm(b) # compute gradient norm for termination check
self._approx_hessian = A # keep a copy of the LHS (i.e., the approximated Hessian)
# perform LM search
num_tr_decreases = 0
num_backtrack = 0
step_success = False
while num_backtrack < self._parameters["max_shrink_steps"]:
try:
perturbation = self.solve_lev_marq(A, b)
decomp_success = True
except npla.LinAlgError:
decomp_success = False
if decomp_success:
proposed_cost = self.propose_update(perturbation)
actual_reduc = self._prev_cost - proposed_cost
predicted_reduc = self.predict_reduction(A, b, perturbation)
actual_to_predicted_ratio = actual_reduc / predicted_reduc
else:
actual_to_predicted_ratio = 0.0
if decomp_success and actual_to_predicted_ratio > self._parameters["ratio_threshold"]:
self.accept_proposed_state()
self._diag_coeff = max(self._diag_coeff * self._parameters["shrink_coeff"], 1e-7)
new_cost = proposed_cost
step_success = True
break
else:
if decomp_success:
self.reject_proposed_state()
self._diag_coeff = min(self._diag_coeff * self._parameters["grow_coeff"], 1e7)
num_tr_decreases += 1
num_backtrack += 1
# print report line if verbose option is enabled
if (self._parameters["verbose"]):
print("Iteration: {0:4} - Cost: {1:10.4f} - TR Shrink: {2:6.3f} - AvP Ratio: {3:6.3f}".format(
self._curr_iteration, new_cost, num_tr_decreases, actual_to_predicted_ratio))
return step_success, new_cost, grad_norm
def solve_lev_marq(self, A: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Solve the Levenberg–Marquardt system of equations:
A*x = b, A = (J^T*J + diagonalCoeff*diag(J^T*J))
"""
# augment diagonal of the 'hessian' matrix
if self._parameters["use_sparse_matrix"]:
A.setdiag(A.diagonal() * (1 + self._diag_coeff))
else:
np.fill_diagonal(A, np.diag(A) * (1 + self._diag_coeff))
# solve system
try:
lev_marq_step = self.solve_gauss_newton(A, b)
except npla.LinAlgError:
raise npla.LinAlgError('Decomposition Failure')
finally:
# revert diagonal of the 'hessian' matrix
if self._parameters["use_sparse_matrix"]:
A.setdiag(A.diagonal() / (1 + self._diag_coeff))
else:
np.fill_diagonal(A, np.diag(A) / (1 + self._diag_coeff))
return lev_marq_step
def predict_reduction(self, A: np.ndarray, b: np.ndarray, step: np.ndarray) -> float:
# grad^T * step - 0.5 * step^T * Hessian * step
grad_trans_step = b.T @ step
step_trans_hessian_step = step.T @ A @ step
return (grad_trans_step - 0.5 * step_trans_hessian_step)[0, 0]
| 34.640777
| 106
| 0.671805
|
3ebc234f625ced8811c1ac107d6c5d05761c7f25
| 85
|
py
|
Python
|
guio/__init__.py
|
GeeTransit/guio
|
3099f8eafd4f3bab373f8874d88f2de29d25542a
|
[
"MIT"
] | 2
|
2019-08-23T01:28:27.000Z
|
2021-03-27T22:49:11.000Z
|
guio/__init__.py
|
GeeTransit/guio
|
3099f8eafd4f3bab373f8874d88f2de29d25542a
|
[
"MIT"
] | null | null | null |
guio/__init__.py
|
GeeTransit/guio
|
3099f8eafd4f3bab373f8874d88f2de29d25542a
|
[
"MIT"
] | null | null | null |
from .event import *
from .task import *
from .kernel import *
__version__ = "0.10"
| 14.166667
| 21
| 0.694118
|
44f378eb675cb0a298fe726d6f339e42549f92d3
| 246,411
|
py
|
Python
|
plots/convergence/plot_nmtf_convergences.py
|
ThomasBrouwer/BNMTF
|
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
|
[
"Apache-2.0"
] | 16
|
2017-04-19T12:04:47.000Z
|
2021-12-03T00:50:43.000Z
|
plots/convergence/plot_nmtf_convergences.py
|
ThomasBrouwer/BNMTF
|
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
|
[
"Apache-2.0"
] | 1
|
2017-04-20T11:26:16.000Z
|
2017-04-20T11:26:16.000Z
|
plots/convergence/plot_nmtf_convergences.py
|
ThomasBrouwer/BNMTF
|
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
|
[
"Apache-2.0"
] | 8
|
2015-12-15T05:29:43.000Z
|
2019-06-05T03:14:11.000Z
|
"""
Plot the convergence of the many different NMF algorithms in a single graph.
We run our method on the entire random dataset, so no test set.
We use a dataset of I=100, J=80, K=10, with unit mean priors and zero mean unit
variance noise.
We have the following methods:
- VB NMF
- Gibbs NMF
"""
import matplotlib.pyplot as plt
metrics = ['MSE']#,'R^2','Rp']
MSE_max = 4
iterations = range(1,1000+1)
folder = "../time_toy/"
# VB NMTF
#vb_all_performances = {'R^2': [0.5640738157611842, 0.9377691999406352, 0.9408616526339433, 0.9430220793430073, 0.9470802255399074, 0.9571789776719599, 0.9699605812528003, 0.9760635099999311, 0.9780471604119425, 0.9787745785769697, 0.979117655140942, 0.9793122320248447, 0.979442553107223, 0.9795472233135014, 0.979648952673204, 0.9797655930929889, 0.9799155560333175, 0.9801217342260696, 0.9804162662940422, 0.9808526205819487, 0.9819319610355989, 0.9883851974229043, 0.9927280833303324, 0.9943854996577983, 0.9951025786041116, 0.9954912151488043, 0.9957322765339365, 0.9958948665516809, 0.9960117359091515, 0.9961000533536801, 0.9961690968954828, 0.9962241531745667, 0.99626859811864, 0.9963048133316338, 0.9963345643705713, 0.9963591903469013, 0.9963797151827377, 0.9963969268499755, 0.9964114391780778, 0.9964237375900546, 0.996434209643847, 0.9964431599981138, 0.9964508155559995, 0.996457354367668, 0.9964629624582434, 0.9964678286215591, 0.9964721063969654, 0.996475907295325, 0.99647931219445, 0.9964823824310624, 0.9964851667312676, 0.996487705117511, 0.9964900311672186, 0.9964921734346631, 0.9964941564358143, 0.9964960013849578, 0.9964977267761304, 0.9964993488594914, 0.9965008820420387, 0.9965023392310542, 0.9965037321325098, 0.9965050715132098, 0.9965063674335122, 0.9965076294563019, 0.9965088668371014, 0.9965100886995745, 0.9965113042001487, 0.9965125226850421, 0.9965137538426868, 0.9965150078544832, 0.9965162955471342, 0.996517628550607, 0.9965190194670926, 0.9965204820579312, 0.9965220314567188, 0.9965236844164703, 0.9965254595953849, 0.9965273778790019, 0.9965294627288181, 0.9965317405443034, 0.9965342410312057, 0.9965369975820632, 0.9965400476870969, 0.9965434333988121, 0.9965472018720952, 0.9965514059981773, 0.9965561051490333, 0.9965613660480214, 0.9965672637796902, 0.9965738829443865, 0.9965813189522302, 0.9965896794384514, 0.9965990857683592, 0.9966096745817756, 0.996621599300098, 0.9966350314922495, 0.996650162001835, 0.9966672018239541, 0.9966863828669749, 0.9967079587212216, 0.9967322049970244, 0.9967594157860322, 0.9967898486203957, 0.9968234582482813, 0.9968600394848183, 0.9968997925125103, 0.9969429129051841, 0.9969894633351029, 0.9970390496467505, 0.9970907715916443, 0.9971441390111052, 0.9971988275554147, 0.9972544815671623, 0.9973106652893031, 0.9973668994104018, 0.9974227548883532, 0.9974778074542953, 0.9975315122761891, 0.9975833407602679, 0.9976330186925664, 0.9976803836313127, 0.9977252643229142, 0.9977674551442283, 0.9978067220965289, 0.997842913594705, 0.9978760084783472, 0.9979062234875882, 0.9979338070813549, 0.9979589365280958, 0.9979817851513406, 0.9980025423994768, 0.9980213908840214, 0.9980385023704782, 0.9980540422060519, 0.998068170573196, 0.9980810404176625, 0.9980927947554629, 0.9981035644323842, 0.9981134643810351, 0.9981225912678804, 0.9981310315540833, 0.9981388683467812, 0.9981461777978892, 0.9981530250938878, 0.9981594645089605, 0.9981655413284088, 0.9981712938324677, 0.9981767548673257, 0.998181952970361, 0.9981869130724069, 0.9981916568965817, 0.9981962032452021, 0.9982005683087833, 0.9982047660234573, 0.998208808440723, 0.998212706066437, 0.9982164681440581, 0.9982201028769475, 0.9982236175973503, 0.998227018894956, 0.9982303127177583, 0.9982335044549455, 0.998236599007962, 0.9982396008529871, 0.9982425140962873, 0.9982453425230426, 0.9982480896399896, 0.9982507587122063, 0.9982533527944328, 0.9982558747573481, 0.9982583273092119, 0.9982607130132615, 0.9982630343012271, 0.9982652934833643, 0.9982674927554878, 0.9982696342036269, 0.9982717198070855, 0.9982737514407948, 0.9982757308778633, 0.9982776597930821, 0.9982795397678634, 0.9982813722967135, 0.9982831587949812, 0.9982849006073462, 0.998286599016399, 0.9982882552507024, 0.9982898704918727, 0.9982914458804258, 0.9982929825203287, 0.9982944814823463, 0.9982959438063724, 0.9982973705029683, 0.9982987625543319, 0.9983001209148931, 0.9983014465116915, 0.9983027402446546, 0.9983040029868545, 0.9983052355847971, 0.9983064388587723, 0.9983076136032801, 0.9983087605875367, 0.9983098805560608, 0.9983109742293322, 0.9983120423045171, 0.9983130854562497, 0.9983141043374615, 0.9983150995802418, 0.9983160717967194, 0.9983170215799447, 0.9983179495047556, 0.9983188561286076, 0.9983197419923511, 0.9983206076209412, 0.9983214535240735, 0.9983222801967421, 0.9983230881197275, 0.9983238777600265, 0.9983246495712377, 0.9983254039939213, 0.99832614145595, 0.9983268623728604, 0.9983275671482174, 0.9983282561739929, 0.9983289298309594, 0.9983295884890947, 0.9983302325079924, 0.9983308622372733, 0.9983314780169886, 0.998332080178013, 0.9983326690424192, 0.9983332449238355, 0.9983338081277798, 0.9983343589519735, 0.9983348976866314, 0.9983354246147319, 0.9983359400122664, 0.9983364441484703, 0.998336937286037, 0.9983374196813162, 0.9983378915844994, 0.9983383532397929, 0.9983388048855795, 0.9983392467545709, 0.9983396790739527, 0.9983401020655205, 0.9983405159458104, 0.9983409209262237, 0.9983413172131462, 0.9983417050080625, 0.998342084507667, 0.9983424559039703, 0.9983428193844011, 0.9983431751319055, 0.9983435233250402, 0.998343864138061, 0.9983441977410042, 0.9983445242997604, 0.998344843976135, 0.9983451569278963, 0.9983454633088038, 0.9983457632686134, 0.9983460569530538, 0.9983463445037694, 0.9983466260582231, 0.9983469017495589, 0.998347171706421, 0.9983474360527407, 0.9983476949075069, 0.9983479483845552, 0.9983481965924337, 0.9983484396344227, 0.9983486776088085, 0.9983489106095028, 0.9983491387270654, 0.9983493620501037, 0.9983495806669156, 0.9983497946671315, 0.9983500041430674, 0.9983502091905337, 0.9983504099089694, 0.9983506064009307, 0.998350798771102, 0.9983509871250609, 0.9983511715680246, 0.9983513522037443, 0.998351529133639, 0.9983517024561878, 0.9983518722665616, 0.9983520386564475, 0.998352201714017, 0.9983523615239931, 0.9983525181677794, 0.9983526717236249, 0.9983528222668043, 0.9983529698698028, 0.9983531146024947, 0.9983532565323145, 0.9983533957244161, 0.9983535322418174, 0.9983536661455348, 0.9983537974947034, 0.9983539263466867, 0.9983540527571751, 0.998354176780275, 0.9983542984685895, 0.998354417873291, 0.9983545350441878, 0.998354650029783, 0.9983547628773302, 0.9983548736328832, 0.9983549823413419, 0.9983550890464947, 0.9983551937910575, 0.9983552966167104, 0.9983553975641312, 0.9983554966730271, 0.9983555939821651, 0.9983556895293988, 0.998355783351696, 0.9983558754851626, 0.9983559659650671, 0.9983560548258632, 0.99835614210121, 0.9983562278239937, 0.9983563120263464, 0.9983563947396643, 0.9983564759946257, 0.9983565558212073, 0.9983566342486998, 0.9983567113057225, 0.9983567870202373, 0.9983568614195613, 0.9983569345303774, 0.9983570063787459, 0.9983570769901122, 0.9983571463893147, 0.9983572146005905, 0.9983572816475791, 0.998357347553325, 0.9983574123402773, 0.9983574760302877, 0.998357538644606, 0.9983576002038733, 0.9983576607281138, 0.9983577202367226, 0.9983577787484541, 0.9983578362814075, 0.9983578928530128, 0.9983579484800172, 0.9983580031784732, 0.9983580569637308, 0.9983581098504356, 0.9983581618525346, 0.9983582129832932, 0.9983582632553257, 0.9983583126806415, 0.9983583612707091, 0.9983584090365404, 0.9983584559887941, 0.9983585021378966, 0.998358547494179, 0.9983585920680239, 0.9983586358700156, 0.9983586789110849, 0.9983587212026415, 0.9983587627566831, 0.9983588035858756, 0.9983588437035973, 0.9983588831239448, 0.9983589218617016, 0.9983589599322703, 0.9983589973515764, 0.9983590341359494, 0.9983590703019898, 0.9983591058664314, 0.998359140846004, 0.9983591752573063, 0.9983592091166904, 0.9983592424401632, 0.9983592752433054, 0.9983593075412093, 0.9983593393484327, 0.9983593706789696, 0.9983594015462347, 0.9983594319630591, 0.9983594619416964, 0.9983594914938354, 0.9983595206306187, 0.9983595493626659, 0.9983595777000978, 0.9983596056525639, 0.9983596332292688, 0.9983596604389993, 0.9983596872901507, 0.9983597137907517, 0.9983597399484886, 0.9983597657707273, 0.9983597912645348, 0.9983598164366984, 0.9983598412937438, 0.998359865841952, 0.9983598900873744, 0.9983599140358468, 0.9983599376930029, 0.9983599610642856, 0.9983599841549585, 0.998360006970116, 0.9983600295146919, 0.9983600517934694, 0.998360073811088, 0.9983600955720513, 0.9983601170807339, 0.9983601383413884, 0.9983601593581507, 0.9983601801350462, 0.9983602006759954, 0.9983602209848185, 0.9983602410652409, 0.9983602609208975, 0.9983602805553369, 0.9983602999720262, 0.9983603191743551, 0.998360338165639, 0.9983603569491234, 0.9983603755279875, 0.998360393905347, 0.9983604120842581, 0.9983604300677198, 0.9983604478586774, 0.9983604654600247, 0.9983604828746068, 0.9983605001052225, 0.998360517154626, 0.9983605340255295, 0.9983605507206041, 0.9983605672424823, 0.9983605835937585, 0.9983605997769905, 0.998360615794701, 0.9983606316493775, 0.9983606473434739, 0.99836066287941, 0.9983606782595734, 0.9983606934863182, 0.9983607085619663, 0.9983607234888074, 0.9983607382690984, 0.9983607529050643, 0.9983607673988978, 0.998360781752759, 0.9983607959687759, 0.9983608100490441, 0.9983608239956265, 0.9983608378105541, 0.9983608514958249, 0.9983608650534047, 0.998360878485227, 0.9983608917931931, 0.998360904979172, 0.9983609180450007, 0.9983609309924847, 0.9983609438233977, 0.9983609565394822, 0.9983609691424497, 0.9983609816339808, 0.9983609940157261, 0.9983610062893058, 0.9983610184563109, 0.9983610305183027, 0.9983610424768143, 0.99836105433335, 0.9983610660893861, 0.998361077746372, 0.9983610893057294, 0.9983611007688542, 0.9983611121371155, 0.9983611234118576, 0.9983611345943989, 0.9983611456860337, 0.998361156688032, 0.9983611676016404, 0.9983611784280817, 0.9983611891685565, 0.998361199824243, 0.9983612103962973, 0.9983612208858547, 0.9983612312940289, 0.9983612416219138, 0.9983612518705827, 0.9983612620410895, 0.9983612721344692, 0.9983612821517374, 0.9983612920938918, 0.9983613019619116, 0.9983613117567591, 0.9983613214793787, 0.9983613311306981, 0.9983613407116287, 0.9983613502230656, 0.998361359665888, 0.9983613690409597, 0.9983613783491294, 0.9983613875912309, 0.9983613967680836, 0.9983614058804925, 0.9983614149292488, 0.9983614239151304, 0.9983614328389013, 0.998361441701313, 0.9983614505031039, 0.9983614592450001, 0.9983614679277153, 0.9983614765519515, 0.9983614851183987, 0.9983614936277356, 0.9983615020806297, 0.9983615104777374, 0.9983615188197041, 0.9983615271071652, 0.9983615353407453, 0.9983615435210593, 0.9983615516487119, 0.998361559724298, 0.9983615677484033, 0.9983615757216043, 0.9983615836444679, 0.9983615915175524, 0.9983615993414074, 0.9983616071165741, 0.9983616148435847, 0.9983616225229636, 0.9983616301552274, 0.9983616377408844, 0.9983616452804354, 0.9983616527743735, 0.9983616602231845, 0.998361667627347, 0.9983616749873323, 0.9983616823036049, 0.9983616895766223, 0.9983616968068355, 0.9983617039946889, 0.9983617111406203, 0.9983617182450615, 0.9983617253084377, 0.9983617323311685, 0.9983617393136673, 0.9983617462563418, 0.998361753159594, 0.99836176002382, 0.998361766849411, 0.9983617736367524, 0.9983617803862244, 0.9983617870982019, 0.9983617937730552, 0.9983618004111492, 0.998361807012844, 0.998361813578495, 0.9983618201084529, 0.9983618266030636, 0.9983618330626688, 0.9983618394876056, 0.9983618458782066, 0.9983618522348003, 0.9983618585577112, 0.9983618648472591, 0.9983618711037604, 0.998361877327527, 0.9983618835188673, 0.9983618896780856, 0.9983618958054826, 0.9983619019013553, 0.9983619079659969, 0.9983619139996972, 0.9983619200027425, 0.9983619259754154, 0.9983619319179956, 0.9983619378307589, 0.9983619437139782, 0.9983619495679231, 0.9983619553928602, 0.9983619611890525, 0.9983619669567604, 0.9983619726962412, 0.998361978407749, 0.9983619840915354, 0.9983619897478487, 0.9983619953769347, 0.9983620009790362, 0.9983620065543934, 0.9983620121032438, 0.9983620176258222, 0.9983620231223609, 0.9983620285930894, 0.998362034038235, 0.998362039458022, 0.998362044852673, 0.9983620502224073, 0.9983620555674426, 0.9983620608879937, 0.9983620661842735, 0.9983620714564923, 0.9983620767048581, 0.9983620819295771, 0.998362087130853, 0.9983620923088872, 0.9983620974638794, 0.9983621025960269, 0.9983621077055248, 0.9983621127925667, 0.9983621178573435, 0.9983621229000446, 0.9983621279208572, 0.9983621329199667, 0.9983621378975565, 0.9983621428538079, 0.9983621477889008, 0.9983621527030128, 0.9983621575963199, 0.9983621624689961, 0.9983621673212141, 0.998362172153144, 0.9983621769649549, 0.9983621817568138, 0.998362186528886, 0.998362191281335, 0.9983621960143229, 0.9983622007280097, 0.9983622054225543, 0.9983622100981132, 0.9983622147548417, 0.9983622193928934, 0.9983622240124201, 0.998362228613572, 0.9983622331964978, 0.998362237761344, 0.9983622423082561, 0.9983622468373775, 0.99836225134885, 0.9983622558428137, 0.9983622603194067, 0.9983622647787657, 0.9983622692210251, 0.998362273646318, 0.9983622780547752, 0.9983622824465256, 0.998362286821696, 0.9983622911804113, 0.9983622955227941, 0.9983622998489645, 0.9983623041590407, 0.9983623084531379, 0.9983623127313689, 0.9983623169938436, 0.9983623212406693, 0.9983623254719497, 0.9983623296877855, 0.9983623338882738, 0.9983623380735079, 0.9983623422435773, 0.998362346398567, 0.9983623505385576, 0.9983623546636247, 0.9983623587738387, 0.9983623628692645, 0.9983623669499609, 0.9983623710159805, 0.9983623750673688, 0.9983623791041641, 0.9983623831263974, 0.9983623871340909, 0.9983623911272582, 0.998362395105904, 0.9983623990700231, 0.9983624030196001, 0.9983624069546091, 0.9983624108750131, 0.9983624147807638, 0.9983624186718014, 0.9983624225480541, 0.9983624264094383, 0.9983624302558587, 0.9983624340872084, 0.9983624379033694, 0.9983624417042135, 0.9983624454896026, 0.9983624492593904, 0.9983624530134237, 0.9983624567515439, 0.9983624604735888, 0.9983624641793956, 0.9983624678688026, 0.9983624715416519, 0.998362475197793, 0.9983624788370848, 0.9983624824593991, 0.9983624860646233, 0.9983624896526635, 0.998362493223447, 0.9983624967769242, 0.9983625003130716, 0.9983625038318923, 0.9983625073334177, 0.9983625108177083, 0.9983625142848535, 0.9983625177349709, 0.9983625211682061, 0.9983625245847314, 0.9983625279847431, 0.9983625313684609, 0.9983625347361239, 0.9983625380879897, 0.9983625414243307, 0.9983625447454316, 0.998362548051587, 0.9983625513430984, 0.9983625546202718, 0.9983625578834152, 0.998362561132837, 0.9983625643688429, 0.9983625675917348, 0.9983625708018095, 0.9983625739993562, 0.9983625771846568, 0.9983625803579835, 0.998362583519599, 0.998362586669755, 0.9983625898086925, 0.9983625929366411, 0.9983625960538185, 0.9983625991604305, 0.9983626022566712, 0.9983626053427226, 0.998362608418755, 0.9983626114849271, 0.998362614541386, 0.9983626175882677, 0.9983626206256971, 0.998362623653789, 0.9983626266726473, 0.9983626296823669, 0.9983626326830328, 0.9983626356747217, 0.9983626386575015, 0.9983626416314328, 0.998362644596569, 0.9983626475529569, 0.9983626505006377, 0.9983626534396471, 0.998362656370017, 0.9983626592917749, 0.9983626622049463, 0.9983626651095538, 0.9983626680056193, 0.9983626708931638, 0.9983626737722087, 0.9983626766427766, 0.9983626795048918, 0.998362682358581, 0.9983626852038748, 0.9983626880408071, 0.9983626908694166, 0.9983626936897474, 0.998362696501849, 0.998362699305777, 0.9983627021015943, 0.9983627048893698, 0.9983627076691804, 0.9983627104411106, 0.9983627132052523, 0.9983627159617059, 0.99836271871058, 0.9983627214519921, 0.9983627241860681, 0.9983627269129435, 0.9983627296327628, 0.9983627323456811, 0.9983627350518635, 0.9983627377514864, 0.9983627404447383, 0.9983627431318202, 0.998362745812947, 0.9983627484883493, 0.9983627511582736, 0.9983627538229849, 0.9983627564827682, 0.9983627591379305, 0.9983627617888028, 0.9983627644357423, 0.9983627670791354, 0.9983627697193987, 0.9983627723569829, 0.998362774992374, 0.9983627776260956, 0.9983627802587106, 0.9983627828908227, 0.998362785523077, 0.9983627881561598, 0.9983627907907981, 0.9983627934277578, 0.9983627960678411, 0.9983627987118817, 0.9983628013607413, 0.9983628040153014, 0.9983628066764573, 0.9983628093451089, 0.9983628120221512, 0.998362814708464, 0.998362817404902, 0.9983628201122826, 0.9983628228313761, 0.9983628255628945, 0.9983628283074821, 0.9983628310657063, 0.99836283383805, 0.9983628366249052, 0.998362839426568, 0.9983628422432356, 0.9983628450750047, 0.9983628479218715, 0.9983628507837333, 0.9983628536603922, 0.998362856551559, 0.998362859456859, 0.9983628623758392, 0.998362865307975, 0.9983628682526788, 0.9983628712093084, 0.9983628741771747, 0.9983628771555516, 0.9983628801436829, 0.9983628831407917, 0.9983628861460869, 0.9983628891587715, 0.9983628921780477, 0.9983628952031244, 0.9983628982332212, 0.9983629012675739, 0.9983629043054381, 0.9983629073460926, 0.9983629103888422, 0.9983629134330203, 0.9983629164779899, 0.9983629195231455, 0.9983629225679134, 0.9983629256117525, 0.9983629286541545, 0.998362931694643, 0.9983629347327737, 0.9983629377681337, 0.9983629408003404, 0.9983629438290408, 0.9983629468539099, 0.9983629498746504, 0.9983629528909906, 0.9983629559026839, 0.9983629589095065, 0.9983629619112574, 0.9983629649077558, 0.9983629678988408, 0.9983629708843699, 0.9983629738642172, 0.9983629768382732, 0.9983629798064427, 0.9983629827686447, 0.9983629857248103, 0.9983629886748826, 0.9983629916188154, 0.9983629945565722, 0.9983629974881257, 0.9983630004134569, 0.9983630033325541, 0.9983630062454127, 0.9983630091520341, 0.9983630120524255, 0.9983630149465992, 0.9983630178345719, 0.998363020716365, 0.9983630235920028, 0.9983630264615135, 0.9983630293249282, 0.9983630321822804, 0.9983630350336061, 0.9983630378789435, 0.9983630407183323, 0.9983630435518138, 0.9983630463794309, 0.9983630492012275, 0.9983630520172487, 0.9983630548275403, 0.9983630576321486, 0.9983630604311211, 0.9983630632245052, 0.9983630660123488, 0.9983630687947004, 0.9983630715716083, 0.9983630743431212, 0.9983630771092878, 0.9983630798701568, 0.9983630826257769, 0.9983630853761966, 0.9983630881214642, 0.9983630908616283, 0.9983630935967369, 0.9983630963268378, 0.9983630990519788, 0.9983631017722072, 0.99836310448757, 0.9983631071981144, 0.9983631099038867, 0.9983631126049329, 0.9983631153012994, 0.9983631179930313, 0.9983631206801743, 0.9983631233627729, 0.9983631260408717, 0.9983631287145152, 0.998363131383747, 0.9983631340486107, 0.9983631367091493, 0.9983631393654058, 0.9983631420174226, 0.9983631446652418, 0.9983631473089051, 0.9983631499484539, 0.9983631525839293, 0.9983631552153719, 0.9983631578428221, 0.99836316046632, 0.9983631630859051, 0.9983631657016168, 0.9983631683134938, 0.9983631709215751, 0.9983631735258988, 0.9983631761265027, 0.9983631787234246, 0.9983631813167018, 0.9983631839063709, 0.9983631864924687, 0.9983631890750313, 0.9983631916540948, 0.9983631942296946, 0.998363196801866, 0.9983631993706438, 0.9983632019360626, 0.9983632044981567, 0.9983632070569598, 0.9983632096125056, 0.9983632121648273, 0.9983632147139576, 0.9983632172599292, 0.9983632198027741, 0.9983632223425242, 0.998363224879211, 0.9983632274128658, 0.9983632299435192, 0.9983632324712017, 0.9983632349959434, 0.998363237517774, 0.9983632400367228, 0.9983632425528189, 0.9983632450660911, 0.9983632475765672, 0.9983632500842755, 0.9983632525892433, 0.9983632550914977, 0.9983632575910654, 0.9983632600879727, 0.9983632625822454, 0.998363265073909, 0.9983632675629885, 0.9983632700495085, 0.9983632725334931, 0.9983632750149658, 0.9983632774939497, 0.9983632799704676, 0.9983632824445414, 0.9983632849161929, 0.9983632873854428, 0.9983632898523117, 0.9983632923168195, 0.9983632947789852, 0.9983632972388274, 0.998363299696364, 0.9983633021516118, 0.9983633046045876, 0.9983633070553066, 0.9983633095037837, 0.9983633119500328, 0.9983633143940667, 0.9983633168358974, 0.9983633192755359, 0.9983633217129919, 0.9983633241482743, 0.9983633265813906, 0.9983633290123469, 0.9983633314411482, 0.9983633338677979, 0.9983633362922979, 0.9983633387146487], 'MSE': [249.84196048717777, 35.666279410735257, 33.893904931879369, 32.655701621325903, 30.329860140737836, 24.541971910184063, 17.216463573530408, 13.718697809450013, 12.581810130350162, 12.164905647400296, 11.968278501706594, 11.856760836795848, 11.782070038464834, 11.722080502422484, 11.663776450988701, 11.59692643783332, 11.510978320085998, 11.392811608038564, 11.224006723291566, 10.973919404155478, 10.355318033733178, 6.6567807841140354, 4.1677467033130231, 3.217833255648443, 2.8068544793131771, 2.5841155850837199, 2.4459563020750212, 2.3527712394253673, 2.2857900154314406, 2.2351727222197457, 2.1956018625957303, 2.1640475095864891, 2.1385748209467779, 2.1178188302745737, 2.1007676455477831, 2.086653783092066, 2.0748904033974505, 2.0650258968790509, 2.0567084600390251, 2.0496598828481138, 2.0436580445910595, 2.0385283365397235, 2.0341407139101833, 2.0303931309807988, 2.0271789713487336, 2.0243900317832213, 2.0219383144222407, 2.0197599091505909, 2.0178084625447568, 2.0160488207472822, 2.0144530574115569, 2.012998234511691, 2.0116651078490739, 2.010437312140521, 2.0093007965716772, 2.0082434026190472, 2.0072545308129053, 2.0063248677147709, 2.0054461562556938, 2.0046109988973635, 2.0038126866073611, 2.0030450486171998, 2.0023023190435065, 2.001579017119099, 2.0008698382351708, 2.0001695533549078, 1.999472914663766, 1.9987745655725748, 1.9980689533594447, 1.997350242767989, 1.9966122287003967, 1.9958482456843534, 1.9950510710382892, 1.9942128177382694, 1.9933248122807357, 1.9923774530262617, 1.9913600464187677, 1.990260622348822, 1.9890657343511715, 1.9877602521227031, 1.9863271504361737, 1.9847472910578652, 1.9829991872529318, 1.9810587375090087, 1.9788989159908037, 1.976489409197081, 1.9737961893242131, 1.9707810152772263, 1.9674008539200487, 1.9636072183360478, 1.959345426213813, 1.9545537886759334, 1.9491627477380404, 1.943093991148388, 1.9362595886471627, 1.928561209100083, 1.9198894744929851, 1.9101234574044899, 1.8991302444116407, 1.8867644955323879, 1.8728682504796277, 1.8572729567856259, 1.8398310152931669, 1.8205683611425183, 1.7996025917442131, 1.7768189766897409, 1.7521054269372034, 1.7254260231919563, 1.6970066674310804, 1.6673633182135073, 1.6367768997180241, 1.6054333061912802, 1.5735363752273583, 1.5413358518483282, 1.5091064433623673, 1.4770940462642013, 1.4455418213643945, 1.4147620277326347, 1.3850576178129828, 1.3565857515953685, 1.3294395291783687, 1.3037171009711608, 1.2795363155703467, 1.2570312844438494, 1.2362888854226406, 1.2173212461520053, 1.2000041465653506, 1.1841951876334267, 1.169792771658714, 1.1566975618907678, 1.1448009799552097, 1.1339983658863593, 1.1241912758839945, 1.1152849446615267, 1.1071875670013429, 1.099811485249536, 1.0930747327752985, 1.0869023180565998, 1.0812283698766434, 1.0759974858760777, 1.0711601126503938, 1.0666686190848145, 1.0624793603044482, 1.0585549760435875, 1.054864360190904, 1.0513815584244632, 1.0480846312296042, 1.0449547535013282, 1.0419755696392328, 1.0391327910583994, 1.0364139676087529, 1.0338083231362571, 1.0313065783863029, 1.0289007461568562, 1.0265839193836424, 1.0243500768552316, 1.0221939208766708, 1.0201107498633146, 1.018096361492635, 1.0161469790221065, 1.0142591934836938, 1.0124299161770196, 1.0106563379419633, 1.0089358933499837, 1.0072662289806895, 1.0056451754371716, 1.0040707229070753, 1.0025410000792554, 1.0010542561932227, 0.9996088459797744, 0.99820321725637229, 0.99683590095731467, 0.99550550338740107, 0.9942107004719446, 0.99295023372592339, 0.99172290758513315, 0.99052758765316651, 0.98936319935239547, 0.98822872646287685, 0.98712320911308271, 0.98604574094992259, 0.984995465428254, 0.98397157137060232, 0.98297328810204054, 0.98199988053255649, 0.98105064453721258, 0.9801249028973964, 0.97922200195018738, 0.97834130898019389, 0.97748221030087623, 0.97664410991883521, 0.97582642865206348, 0.97502860357442378, 0.97425008767447263, 0.97349034963874947, 0.97274887369275687, 0.9720251594534689, 0.97131872176373901, 0.97062909049163182, 0.9699558102862097, 0.96929844028716028, 0.96865655378924131, 0.96802973786456892, 0.9674175929469887, 0.96681973238387531, 0.96623578196144333, 0.96566537941076269, 0.96510817390309556, 0.96456382554404818, 0.9640320048773392, 0.96351239240900344, 0.96300467816200175, 0.96250856126917395, 0.9620237496093097, 0.96154995948692812, 0.96108691535261559, 0.96063434955703042, 0.96019200212967049, 0.95975962057253561, 0.95933695965956067, 0.95892378123454902, 0.95851985400269868, 0.95812495331349401, 0.95773886093524285, 0.95736136482297163, 0.95699225888298223, 0.95663134273754846, 0.95627842149332154, 0.95593330551661615, 0.95559581021801454, 0.95526575584816142, 0.95494296730572703, 0.95462727395809788, 0.95431850947464336, 0.95401651167220125, 0.95372112237208495, 0.9534321872677034, 0.95314955580191318, 0.95287308105313184, 0.95260261962930104, 0.9523380315688138, 0.95207918024758886, 0.95182593229164691, 0.95157815749442609, 0.95133572873840733, 0.95109852192040933, 0.95086641588022791, 0.95063929233225453, 0.95041703579970194, 0.95019953355131004, 0.94998667554033811, 0.94977835434577296, 0.94957446511599908, 0.94937490551498238, 0.94917957567167344, 0.94898837813319903, 0.9488012178230476, 0.94861800200570245, 0.94843864025959634, 0.94826304446069309, 0.94809112877955382, 0.94792280969476084, 0.94775800602590154, 0.94759663898885771, 0.94743863227511493, 0.94728391215483432, 0.94713240759934714, 0.94698405041274625, 0.94683877535243799, 0.94669652020648298, 0.94655722578184054, 0.94642083574781888, 0.94628729628086405, 0.94615655547861699, 0.94602856255747103, 0.9459032669114813, 0.94578061717075745, 0.945660560426271, 0.94554304176635517, 0.9454280041997587, 0.94531538894837031, 0.9452051360145286, 0.94509718488870775, 0.94499147526792593, 0.94488794768919404, 0.94478654402647311, 0.94468720783847271, 0.9445898845799876, 0.94449452170251147, 0.94440106867237128, 0.94430947693237655, 0.94421969982770326, 0.94413169251162421, 0.94404541184194257, 0.94396081627547612, 0.94387786576506671, 0.94379652166198158, 0.94371674662486771, 0.94363850453614373, 0.94356176042560436, 0.94348648040116168, 0.9434126315861896, 0.94334018206290227, 0.94326910082130855, 0.9431993577130553, 0.94313092340968718, 0.94306376936481207, 0.94299786777972494, 0.94293319157202204, 0.94286971434696742, 0.9428074103711257, 0.94274625454811889, 0.94268622239616473, 0.9426272900272582, 0.94256943412775474, 0.94251263194019519, 0.94245686124627936, 0.94240210035084582, 0.94234832806670976, 0.94229552370037861, 0.94224366703844809, 0.94219273833473371, 0.94214271829798868, 0.94209358808025756, 0.94204532926573881, 0.94199792386028669, 0.94195135428130794, 0.94190560334833107, 0.94186065427398524, 0.94181649065564121, 0.94177309646755214, 0.94173045605362926, 0.94168855412088925, 0.94164737573360047, 0.9416069063082122, 0.94156713160911631, 0.94152803774536298, 0.94148961116836904, 0.9414518386707299, 0.94141470738620703, 0.94137820479097356, 0.94134231870613339, 0.94130703730157073, 0.94127234910105484, 0.94123824298853165, 0.94120470821537661, 0.94117173440836421, 0.94113931157785036, 0.94110743012558662, 0.94107608085140138, 0.94104525495772495, 0.94101494405079578, 0.94098514013714207, 0.94095583561392293, 0.9409270232515935, 0.94089869616727873, 0.94087084778793195, 0.94084347180228933, 0.94081656210169096, 0.94079011271038282, 0.9407641177072612, 0.94073857114175896, 0.94071346694781999, 0.94068879886059154, 0.9406645603408923, 0.94064074451242818, 0.94061734411604347, 0.94059435148427784, 0.94057175853787245, 0.94054955680408237, 0.94052773745515983, 0.94050629136374697, 0.94048520917097322, 0.94046448136250638, 0.94044409834786713, 0.94042405053857303, 0.94040432842151511, 0.94038492262483753, 0.94036582397452362, 0.94034702354070632, 0.94032851267368212, 0.94031028302991937, 0.94029232658900697, 0.94027463566264702, 0.94025720289689141, 0.94024002126892103, 0.94022308407955468, 0.94020638494255604, 0.94018991777165051, 0.94017367676617292, 0.94015765639579796, 0.94014185138507456, 0.94012625669801819, 0.94011086752307904, 0.94009567925874449, 0.94008068749985008, 0.9400658880246876, 0.94005127678299572, 0.94003684988476321, 0.94002260358987588, 0.94000853429857523, 0.93999463854265486, 0.93998091297735131, 0.93996735437393619, 0.93995395961283534, 0.93994072567735476, 0.93992764964784803, 0.93991472869637605, 0.93990196008174431, 0.93988934114491118, 0.93987686930468872, 0.93986454205377967, 0.93985235695505887, 0.9398403116380375, 0.93982840379560539, 0.93981663118089476, 0.93980499160434428, 0.9397934829308936, 0.93978210307732535, 0.93977085000968374, 0.93975972174089373, 0.93974871632837786, 0.93973783187186444, 0.93972706651124904, 0.93971641842451692, 0.93970588582587355, 0.93969546696381778, 0.93968516011945347, 0.93967496360481906, 0.93966487576134372, 0.93965489495839316, 0.93964501959198143, 0.93963524808347965, 0.93962557887858567, 0.93961601044627474, 0.93960654127793264, 0.93959716988655162, 0.93958789480609217, 0.93957871459089148, 0.93956962781516706, 0.93956063307264592, 0.93955172897624306, 0.93954291415782309, 0.93953418726800153, 0.93952554697606405, 0.93951699196983252, 0.93950852095566106, 0.93950013265839405, 0.93949182582138735, 0.93948359920653468, 0.93947545159424328, 0.93946738178349798, 0.93945938859188893, 0.93945147085557934, 0.93944362742932908, 0.93943585718646971, 0.93942815901887777, 0.93942053183691876, 0.93941297456936435, 0.93940548616331154, 0.93939806558408678, 0.93939071181507894, 0.93938342385762985, 0.93937620073084926, 0.93936904147145583, 0.93936194513354676, 0.93935491078842348, 0.93934793752436929, 0.93934102444638923, 0.93933417067600644, 0.93932737535098854, 0.93932063762511087, 0.93931395666787898, 0.93930733166427383, 0.93930076181447297, 0.93929424633360759, 0.93928778445144001, 0.93928137541213697, 0.93927501847398331, 0.93926871290908109, 0.93926245800310915, 0.93925625305505112, 0.9392500973769069, 0.93924399029345917, 0.93923793114196197, 0.9392319192719385, 0.9392259540448904, 0.93922003483405436, 0.93921416102415567, 0.93920833201117748, 0.93920254720209029, 0.93919680601466304, 0.93919110787718152, 0.93918545222827499, 0.9391798385166571, 0.93917426620092381, 0.93916873474935481, 0.93916324363967851, 0.93915779235889041, 0.93915238040305704, 0.93914700727710276, 0.93914167249463754, 0.93913637557777496, 0.93913111605692312, 0.93912589347065223, 0.93912070736548137, 0.93911555729573493, 0.93911044282334943, 0.93910536351775675, 0.93910031895568002, 0.93909530872102065, 0.93909033240465589, 0.93908538960435028, 0.93908047992457533, 0.93907560297636761, 0.93907075837721865, 0.93906594575091196, 0.93906116472740309, 0.93905641494268532, 0.93905169603868177, 0.93904700766309823, 0.93904234946931908, 0.93903772111628581, 0.93903312226838098, 0.93902855259530715, 0.93902401177201056, 0.93901949947852115, 0.9390150153999044, 0.93901055922609022, 0.93900613065184768, 0.93900172937664284, 0.93899735510455007, 0.93899300754414405, 0.93898868640844979, 0.93898439141479784, 0.93898012228478989, 0.93897587874417643, 0.93897166052277259, 0.93896746735441516, 0.93896329897682707, 0.93895915513157546, 0.93895503556399773, 0.93895094002309887, 0.93894686826150153, 0.93894282003536123, 0.9389387951043201, 0.93893479323139273, 0.93893081418294833, 0.93892685772860529, 0.9389229236412161, 0.93891901169673464, 0.93891512167423075, 0.93891125335577297, 0.93890740652640803, 0.93890358097406912, 0.93889977648956524, 0.93889599286648096, 0.93889222990114729, 0.93888848739259245, 0.93888476514248176, 0.93888106295508167, 0.93887738063717774, 0.93887371799807318, 0.93887007484951368, 0.93886645100565269, 0.93886284628300132, 0.93885926050038249, 0.93885569347891284, 0.9388521450419226, 0.93884861501496775, 0.93884510322572112, 0.93884160950400264, 0.9388381336817061, 0.93883467559277733, 0.93883123507314914, 0.9388278119607536, 0.9388244060954466, 0.93882101731901468, 0.93881764547508961, 0.93881429040917574, 0.93881095196856557, 0.93880763000235623, 0.93880432436138828, 0.93880103489822353, 0.93879776146713745, 0.93879450392406139, 0.93879126212657582, 0.93878803593387394, 0.93878482520673889, 0.93878162980754065, 0.93877844960015711, 0.938775284450025, 0.93877213422403971, 0.93876899879059073, 0.93876587801950961, 0.93876277178205902, 0.93875967995092258, 0.93875660240013992, 0.93875353900514291, 0.93875048964270458, 0.93874745419092165, 0.93874443252920414, 0.93874142453823683, 0.93873843010000058, 0.93873544909772155, 0.9387324814158472, 0.9387295269400685, 0.93872658555727018, 0.93872365715552319, 0.93872074162407626, 0.93871783885333038, 0.93871494873483019, 0.938712071161249, 0.9387092060263762, 0.93870635322509643, 0.93870351265339225, 0.93870068420830588, 0.93869786778795494, 0.93869506329150809, 0.93869227061914828, 0.93868948967212384, 0.93868672035267731, 0.93868396256406317, 0.93868121621054035, 0.93867848119735087, 0.93867575743072662, 0.93867304481787661, 0.93867034326696763, 0.93866765268714492, 0.9386649729885036, 0.93866230408210005, 0.93865964587993789, 0.9386569982949764, 0.93865436124113732, 0.93865173463328377, 0.93864911838724507, 0.93864651241981178, 0.93864391664874958, 0.93864133099279901, 0.9386387553716955, 0.93863618970618223, 0.93863363391803467, 0.93863108793006111, 0.93862855166614256, 0.93862602505126835, 0.93862350801153993, 0.93862100047424968, 0.93861850236787869, 0.93861601362216451, 0.93861353416817506, 0.93861106393832361, 0.93860860286647729, 0.9386061508880108, 0.93860370793989201, 0.93860127396074777, 0.93859884889103296, 0.93859643267306525, 0.93859402525119517, 0.93859162657190831, 0.93858923658398552, 0.93858685523866503, 0.9385844824897871, 0.93858211829399218, 0.93857976261092702, 0.93857741540342698, 0.93857507663775919, 0.9385727462838398, 0.93857042431551685, 0.93856811071078605, 0.93856580545207502, 0.93856350852654069, 0.93856121992631913, 0.93855893964881543, 0.93855666769701274, 0.93855440407969681, 0.9385521488117351, 0.93854990191430332, 0.93854766341507923, 0.93854543334840934, 0.93854321175539002, 0.93854099868392082, 0.93853879418865527, 0.9385365983308257, 0.93853441117804715, 0.93853223280388853, 0.93853006328741495, 0.93852790271248587, 0.93852575116695558, 0.93852360874173568, 0.93852147552963971, 0.93851935162411204, 0.93851723711781954, 0.93851513210112014, 0.93851303666044272, 0.93851095087659064, 0.93850887482305301, 0.93850680856431257, 0.93850475215421725, 0.93850270563450555, 0.93850066903343832, 0.93849864236466629, 0.93849662562631708, 0.93849461880033458, 0.93849262185214755, 0.93849063473058303, 0.93848865736807219, 0.93848668968117044, 0.93848473157131129, 0.93848278292577092, 0.93848084361887829, 0.93847891351333979, 0.93847699246167293, 0.93847508030774063, 0.93847317688831078, 0.93847128203461272, 0.93846939557382592, 0.9384675173305479, 0.93846564712819347, 0.93846378479020531, 0.93846193014124568, 0.93846008300822092, 0.93845824322121152, 0.93845641061421992, 0.93845458502593204, 0.93845276630020003, 0.93845095428656888, 0.93844914884062658, 0.93844734982431055, 0.93844555710611099, 0.93844377056122552, 0.93844199007163298, 0.93844021552615442, 0.93843844682043254, 0.93843668385687007, 0.93843492654455629, 0.938433174799161, 0.93843142854275508, 0.93842968770367186, 0.93842795221629116, 0.93842622202085413, 0.93842449706314468, 0.93842277729430656, 0.9384210626705406, 0.93841935315276159, 0.93841764870631528, 0.93841594930063355, 0.93841425490883101, 0.93841256550737517, 0.93841088107566328, 0.93840920159560171, 0.93840752705118902, 0.93840585742806371, 0.93840419271308828, 0.93840253289384257, 0.93840087795821481, 0.93839922789393382, 0.93839758268808615, 0.9383959423267455, 0.93839430679448399, 0.93839267607399246, 0.93839105014568913, 0.9383894289873892, 0.93838781257390314, 0.93838620087680968, 0.93838459386412032, 0.93838299150007887, 0.93838139374491158, 0.93837980055466397, 0.93837821188100234, 0.9383766276710942, 0.93837504786744874, 0.93837347240778102, 0.938371901224892, 0.93837033424649252, 0.93836877139503849, 0.93836721258753508, 0.93836565773526781, 0.93836410674351489, 0.9383625595111631, 0.93836101593029109, 0.93835947588561064, 0.93835793925388034, 0.9383564059031525, 0.93835487569199116, 0.93835334846853413, 0.93835182406944539, 0.93835030231879801, 0.93834878302687941, 0.93834726598888307, 0.93834575098356765, 0.938344237771916, 0.93834272609577951, 0.93834121567660056, 0.93833970621426621, 0.93833819738611413, 0.93833668884617205, 0.93833518022478191, 0.93833367112854282, 0.93833216114078521, 0.93833064982254333, 0.9383291367141855, 0.93832762133762815, 0.93832610319929954, 0.93832458179376055, 0.93832305660798365, 0.93832152712623718, 0.93831999283556489, 0.93831845323159291, 0.93831690782466803, 0.93831535614617545, 0.9383137977547199, 0.9383122322421571, 0.93831065923925938, 0.93830907842082711, 0.93830748951009957, 0.93830589228245276, 0.9383042865681136, 0.93830267225408004, 0.93830104928495439, 0.93829941766290947, 0.93829777744671727, 0.93829612874993085, 0.93829447173828429, 0.93829280662642545, 0.93829113367410233, 0.93828945318187085, 0.9382877654865236, 0.93828607095625904, 0.93828436998581866, 0.93828266299155771, 0.93828095040666881, 0.93827923267657953, 0.9382775102545251, 0.93827578359752251, 0.93827405316257162, 0.93827231940325551, 0.93827058276673503, 0.9382688436910499, 0.93826710260284674, 0.93826535991544113, 0.93826361602720609, 0.93826187132032457, 0.93826012615977672, 0.93825838089265734, 0.93825663584770302, 0.93825489133502527, 0.93825314764609802, 0.93825140505383209, 0.93824966381284913, 0.9382479241598437, 0.93824618631408596, 0.93824445047794813, 0.93824271683755345, 0.93824098556341684, 0.93823925681117826, 0.93823753072231197, 0.93823580742488932, 0.93823408703428512, 0.93823236965398105, 0.93823065537622519, 0.93822894428280279, 0.93822723644571404, 0.93822553192785108, 0.93822383078364158, 0.9382221330596705, 0.93822043879528638, 0.93821874802313776, 0.93821706076974121, 0.93821537705594404, 0.93821369689743661, 0.93821202030515305, 0.9382103472857356, 0.93820867784188966, 0.93820701197272605, 0.93820534967416047, 0.93820369093915001, 0.93820203575802819, 0.93820038411876439, 0.93819873600718429, 0.93819709140721075, 0.93819545030107809, 0.93819381266949409, 0.93819217849186543, 0.938190547746377, 0.9381889204102084, 0.93818729645962506, 0.93818567587010204, 0.93818405861645437, 0.93818244467291045, 0.93818083401319541, 0.93817922661064368, 0.93817762243823011, 0.93817602146868351, 0.938174423674486, 0.93817282902797539, 0.93817123750135889, 0.93816964906677813, 0.93816806369632555, 0.93816648136210268, 0.9381649020361984, 0.93816332569079119, 0.93816175229810483, 0.93816018183045835, 0.93815861426028724, 0.93815704956013546, 0.93815548770268209, 0.93815392866077751, 0.93815237240740079, 0.93815081891571706, 0.93814926815905064, 0.93814772011091363, 0.93814617474499984, 0.93814463203518339, 0.93814309195554135, 0.93814155448034819, 0.9381400195840629, 0.93813848724134319, 0.9381369574270696, 0.93813543011628897, 0.93813390528427953, 0.93813238290650147, 0.93813086295863246, 0.93812934541653825, 0.93812783025629776, 0.93812631745418762, 0.93812480698667433, 0.93812329883045398, 0.93812179296238907, 0.93812028935955949, 0.93811878799925008, 0.93811728885892509, 0.9381157919162596, 0.93811429714911654, 0.93811280453556978, 0.93811131405387904, 0.93810982568249213, 0.93810833940006988, 0.93810685518544956, 0.93810537301767138, 0.93810389287597395, 0.93810241473978229, 0.93810093858870525, 0.93809946440256686, 0.93809799216136236, 0.93809652184529935, 0.93809505343476407, 0.93809358691034184, 0.93809212225281335, 0.93809065944313896, 0.93808919846249927, 0.93808773929224654, 0.93808628191394428, 0.93808482630934631, 0.9380833724604003, 0.93808192034926752, 0.93808046995828565, 0.93807902127001719, 0.93807757426721206, 0.93807612893283976, 0.93807468525006221, 0.93807324320226271, 0.93807180277302993, 0.93807036394618259, 0.93806892670572672, 0.93806749103591536, 0.93806605692123257, 0.93806462434636662, 0.93806319329626064, 0.93806176375608608, 0.93806033571126679, 0.93805890914747692, 0.93805748405063971, 0.938056060406953, 0.93805463820287638, 0.93805321742515468, 0.93805179806083383, 0.93805038009722896, 0.9380489635220004, 0.93804754832310178, 0.93804613448883956, 0.93804472200785205, 0.93804331086915627, 0.93804190106213259, 0.93804049257656275, 0.93803908540263903, 0.93803767953099682, 0.93803627495270991, 0.93803487165933641, 0.93803346964294043, 0.93803206889610702, 0.9380306694119861, 0.93802927118430801, 0.93802787420741862, 0.93802647847632425, 0.93802508398672091, 0.93802369073503822, 0.93802229871847465, 0.93802090793504989, 0.93801951838364162, 0.93801813006404955], 'Rp': [0.93577179442291958, 0.96909596999980718, 0.97006798079822532, 0.97112995718622852, 0.97319823201471622, 0.97836384132782406, 0.98487976866350735, 0.98796846437474206, 0.98896665631063407, 0.98933243464266796, 0.98950510364898114, 0.98960308966193278, 0.98966874750254086, 0.98972151171303613, 0.98977282787172272, 0.98983170179024127, 0.98990742739422233, 0.99001156349832087, 0.99016033691347516, 0.99038074107836538, 0.99092591904524174, 0.99418145283974302, 0.9963625213796018, 0.99719117894828624, 0.99754943088890069, 0.99774372734024619, 0.99786430907985757, 0.99794566513579275, 0.9980041562792018, 0.99804836361851423, 0.99808292567340662, 0.99811048659646073, 0.99813273569819261, 0.99815086493461114, 0.99816575792125406, 0.9981780849526819, 0.9981883587017486, 0.9981969737593962, 0.99820423749622955, 0.9982103929595062, 0.9982156342370474, 0.99822011411112399, 0.99822394644117163, 0.99822722006825537, 0.99823002761816804, 0.99823246344599692, 0.99823460448729218, 0.99823650667175434, 0.99823821056678963, 0.99823974692674944, 0.9982411401707274, 0.99824241034671923, 0.99824357426840515, 0.998244646230802, 0.99824563850549064, 0.99824656171054482, 0.99824742510204345, 0.99824823681268915, 0.99824900405238781, 0.9982497332801854, 0.99825043035365346, 0.99825110066016809, 0.99825174923357352, 0.99825238085885259, 0.99825300016744378, 0.99825361172516747, 0.99825422011459453, 0.99825483001359916, 0.99825544627143958, 0.99825607398390026, 0.9982567185692528, 0.99825738584697621, 0.99825808212237288, 0.99825881428066343, 0.99825958989488861, 0.99826041735165227, 0.99826130599633511, 0.9982622662960271, 0.99826331001424473, 0.99826445039081835, 0.99826570232379719, 0.998267082557309, 0.99826860988534682, 0.99827030538356898, 0.99827219267981604, 0.99827429827231251, 0.99827665190341974, 0.99827928699656499, 0.99828224116293995, 0.99828555678059527, 0.99828928164347475, 0.99829346967135901, 0.99829818166489159, 0.99830348608046882, 0.9983094597860741, 0.99831618874510664, 0.99832376857770722, 0.99833230499252334, 0.99834191415923168, 0.99835272308976619, 0.99836486981171957, 0.99837850158955566, 0.99839374717490847, 0.99841058288162288, 0.99842890578611343, 0.99844881765238158, 0.9984704168411801, 0.9984937381440524, 0.99851858550774208, 0.99854449776326559, 0.99857122927143638, 0.99859862014874823, 0.99862649345176635, 0.99865463159629353, 0.99868279392477233, 0.99871076454911445, 0.99873833075704932, 0.99876521996111889, 0.99879116824420033, 0.9988160383216047, 0.99883974855876145, 0.9988622134447851, 0.99888333097272564, 0.99890298466914695, 0.99892109823822872, 0.99893766024119146, 0.99895277967566265, 0.99896658186887777, 0.99897915557663652, 0.99899058735693036, 0.99900097214645089, 0.99901040149724429, 0.99901896146470825, 0.99902673485661531, 0.99903380189016511, 0.99904023916663087, 0.99904611833849433, 0.99905150499819562, 0.99905645671485166, 0.99906102174718159, 0.9990652432308148, 0.99906916277377678, 0.99907281857450669, 0.99907624331440459, 0.99907946416673954, 0.99908250377097174, 0.99908538123702884, 0.99908811294471511, 0.99909071312187825, 0.99909319420686249, 0.99909556705058133, 0.99909784105643096, 0.99910002432925715, 0.99910212384968455, 0.9991041456561921, 0.99910609501322689, 0.99910797655215156, 0.99910979438229097, 0.99911155217545433, 0.99911325323052813, 0.99911490052426155, 0.99911649675308489, 0.99911804436893936, 0.99911954561083649, 0.99912100253269553, 0.99912241702785298, 0.99912379085053171, 0.99912512563418376, 0.99912642290722298, 0.99912768410611175, 0.99912891058617481, 0.99913010363025612, 0.9991312644554079, 0.99913239421787714, 0.9991334940165536, 0.99913456489520092, 0.99913560784399291, 0.99913662380060431, 0.99913761365152587, 0.99913857823381758, 0.99913951833766179, 0.99914043470969371, 0.9991413280569138, 0.99914219905111112, 0.99914304833315892, 0.99914387651712189, 0.99914468419381575, 0.99914547193373904, 0.99914624028928389, 0.9991469897964097, 0.99914772097577242, 0.99914843433339473, 0.99914913036117681, 0.99914980953711496, 0.99915047232550269, 0.99915111917700017, 0.99915175052880878, 0.99915236680476771, 0.99915296841554291, 0.99915355575885645, 0.99915412921969793, 0.99915468917066552, 0.99915523597228051, 0.99915576997334954, 0.99915629151136354, 0.99915680091293613, 0.99915729849419044, 0.99915778456125626, 0.99915825941068181, 0.99915872332990452, 0.99915917659759246, 0.99915961948415088, 0.99916005225197924, 0.99916047515585837, 0.99916088844322115, 0.99916129235435691, 0.99916168712269471, 0.99916207297495907, 0.99916245013137828, 0.99916281880581848, 0.99916317920602138, 0.99916353153371784, 0.99916387598489831, 0.99916421274993139, 0.99916454201381133, 0.99916486395633375, 0.99916517875234023, 0.99916548657187032, 0.99916578758040475, 0.99916608193899881, 0.99916636980450768, 0.99916665132971638, 0.9991669266635389, 0.99916719595110759, 0.99916745933394313, 0.99916771695009587, 0.99916796893421056, 0.99916821541767376, 0.99916845652868991, 0.99916869239238881, 0.99916892313092531, 0.99916914886350594, 0.99916936970653059, 0.99916958577362425, 0.99916979717572207, 0.99917000402113054, 0.99917020641557408, 0.99917040446228755, 0.99917059826204735, 0.99917078791322755, 0.99917097351187212, 0.99917115515173394, 0.99917133292429838, 0.99917150691889101, 0.999171677222639, 0.99917184392059122, 0.99917200709569665, 0.99917216682881849, 0.99917232319882676, 0.99917247628251338, 0.9991726261546644, 0.99917277288797657, 0.99917291655306184, 0.99917305721837057, 0.99917319495011636, 0.99917332981220053, 0.99917346186611233, 0.99917359117079196, 0.9991737177826685, 0.9991738417555398, 0.99917396314072304, 0.99917408198722357, 0.99917419834217247, 0.99917431225132469, 0.99917442375980237, 0.99917453291285252, 0.99917463975657483, 0.99917474433850073, 0.99917484670781187, 0.99917494691534836, 0.99917504501323162, 0.99917514105435656, 0.99917523509179218, 0.99917532717818991, 0.99917541736528481, 0.99917550570352998, 0.99917559224183317, 0.99917567702740817, 0.99917576010575748, 0.99917584152060401, 0.99917592131400024, 0.99917599952638825, 0.99917607619661819, 0.99917615136213433, 0.99917622505898684, 0.99917629732195101, 0.99917636818461997, 0.99917643767945863, 0.99917650583786943, 0.99917657269030469, 0.99917663826624548, 0.99917670259434022, 0.99917676570235436, 0.99917682761733695, 0.99917688836553964, 0.9991769479725463, 0.99917700646321261, 0.99917706386178717, 0.99917712019188842, 0.9991771754765264, 0.99917722973814938, 0.99917728299864772, 0.99917733527936037, 0.99917738660115141, 0.99917743698432837, 0.99917748644876014, 0.99917753501383411, 0.99917758269846946, 0.99917762952115208, 0.99917767549994874, 0.99917772065248267, 0.99917776499599487, 0.99917780854730276, 0.99917785132286763, 0.99917789333874441, 0.99917793461064119, 0.99917797515392481, 0.99917801498355141, 0.99917805411418248, 0.99917809256012646, 0.99917813033539182, 0.99917816745361365, 0.99917820392814216, 0.99917823977200593, 0.99917827499795064, 0.99917830961838316, 0.99917834364541491, 0.9991783770908671, 0.99917840996627516, 0.99917844228285413, 0.99917847405152138, 0.99917850528290464, 0.99917853598730411, 0.99917856617474443, 0.99917859585489466, 0.99917862503712085, 0.99917865373048009, 0.9991786819436419, 0.99917870968498546, 0.99917873696250314, 0.99917876378385884, 0.99917879015635136, 0.99917881608693937, 0.99917884158224368, 0.99917886664856848, 0.99917889129187332, 0.99917891551793769, 0.99917893933224755, 0.99917896274015527, 0.99917898574689967, 0.99917900835768947, 0.99917903057777124, 0.99917905241247573, 0.9991790738673455, 0.99917909494815049, 0.99917911566096318, 0.99917913601219122, 0.99917915600858154, 0.99917917565723968, 0.99917919496559804, 0.99917921394138776, 0.99917923259259578, 0.99917925092737059, 0.99917926895402387, 0.99917928668086886, 0.99917930411625555, 0.99917932126841691, 0.99917933814549753, 0.99917935475543085, 0.99917937110594979, 0.99917938720455091, 0.99917940305845077, 0.99917941867459059, 0.99917943405963838, 0.99917944921995205, 0.99917946416163295, 0.99917947889046699, 0.99917949341201295, 0.99917950773153896, 0.99917952185408643, 0.99917953578446894, 0.99917954952724075, 0.99917956308679046, 0.99917957646731137, 0.99917958967279941, 0.99917960270708528, 0.99917961557384394, 0.99917962827659323, 0.99917964081872723, 0.99917965320349567, 0.99917966543403314, 0.99917967751336256, 0.99917968944438529, 0.99917970122992006, 0.99917971287268159, 0.99917972437529956, 0.99917973574030516, 0.99917974697017531, 0.99917975806728021, 0.99917976903395833, 0.99917977987243745, 0.99917979058490003, 0.99917980117347516, 0.99917981164024461, 0.99917982198719424, 0.99917983221629525, 0.99917984232945356, 0.9991798523285379, 0.99917986221535648, 0.99917987199168079, 0.99917988165926264, 0.99917989121978157, 0.99917990067490925, 0.99917991002626971, 0.9991799192754478, 0.9991799284240247, 0.99917993747351463, 0.99917994642543273, 0.99917995528125747, 0.99917996404244169, 0.99917997271040981, 0.99917998128657948, 0.99917998977233424, 0.9991799981690237, 0.99918000647799543, 0.99918001470058471, 0.99918002283808316, 0.99918003089179452, 0.99918003886296924, 0.99918004675287053, 0.99918005456272574, 0.99918006229375445, 0.99918006994714892, 0.99918007752409033, 0.99918008502576006, 0.99918009245330042, 0.9991800998078284, 0.99918010709048333, 0.99918011430233689, 0.99918012144450175, 0.99918012851802385, 0.99918013552395424, 0.99918014246332731, 0.99918014933715504, 0.99918015614644995, 0.99918016289217204, 0.99918016957530464, 0.99918017619679422, 0.99918018275756249, 0.99918018925852814, 0.99918019570060035, 0.99918020208465297, 0.99918020841156063, 0.99918021468217633, 0.99918022089733438, 0.99918022705785492, 0.99918023316455162, 0.99918023921820742, 0.99918024521960458, 0.99918025116950415, 0.99918025706865876, 0.99918026291778872, 0.99918026871762766, 0.99918027446888213, 0.99918028017224414, 0.99918028582840235, 0.99918029143801035, 0.99918029700173172, 0.99918030252020817, 0.99918030799407487, 0.99918031342395042, 0.99918031881044023, 0.99918032415414038, 0.99918032945563362, 0.99918033471551471, 0.99918033993432742, 0.99918034511263065, 0.99918035025098062, 0.9991803553498857, 0.999180360409894, 0.99918036543150279, 0.99918037041523622, 0.99918037536157944, 0.99918038027101341, 0.99918038514403862, 0.99918038998109182, 0.99918039478266651, 0.99918039954920079, 0.99918040428113253, 0.99918040897892069, 0.99918041364297505, 0.99918041827372861, 0.9991804228715977, 0.99918042743698199, 0.99918043197028561, 0.99918043647191024, 0.99918044094222924, 0.99918044538162221, 0.9991804497904887, 0.9991804541691659, 0.99918045851803472, 0.999180462837435, 0.99918046712773068, 0.99918047138924726, 0.99918047562234491, 0.99918047982734393, 0.9991804840045726, 0.99918048815434723, 0.99918049227699046, 0.99918049637280992, 0.99918050044211271, 0.99918050448520557, 0.99918050850237872, 0.99918051249391571, 0.99918051646011175, 0.99918052040125904, 0.99918052431761772, 0.99918052820947001, 0.99918053207709667, 0.99918053592073708, 0.99918053974066867, 0.99918054353714969, 0.99918054731042327, 0.99918055106074766, 0.99918055478836665, 0.9991805584935175, 0.99918056217643536, 0.99918056583737458, 0.99918056947654788, 0.99918057309417974, 0.99918057669050864, 0.9991805802657443, 0.99918058382011654, 0.9991805873538282, 0.99918059086710209, 0.99918059436013984, 0.99918059783314206, 0.99918060128632669, 0.99918060471987946, 0.99918060813401288, 0.99918061152890492, 0.99918061490475263, 0.99918061826175453, 0.99918062160008525, 0.99918062491993387, 0.99918062822147347, 0.99918063150489989, 0.99918063477038066, 0.99918063801808854, 0.99918064124820272, 0.99918064446088095, 0.99918064765631265, 0.99918065083463314, 0.99918065399603762, 0.99918065714066151, 0.99918066026867958, 0.99918066338024436, 0.99918066647550852, 0.99918066955462614, 0.99918067261775245, 0.99918067566503865, 0.99918067869662419, 0.99918068171266006, 0.99918068471329857, 0.99918068769866708, 0.99918069066890869, 0.99918069362417072, 0.99918069656459263, 0.99918069949029775, 0.99918070240142098, 0.99918070529810699, 0.99918070818047633, 0.99918071104866579, 0.9991807139028096, 0.99918071674301345, 0.9991807195694149, 0.99918072238213551, 0.9991807251813013, 0.99918072796702406, 0.99918073073943414, 0.99918073349864756, 0.99918073624477755, 0.99918073897793525, 0.99918074169824633, 0.99918074440581717, 0.99918074710076399, 0.99918074978318994, 0.9991807524532067, 0.99918075511093074, 0.99918075775645032, 0.99918076038989279, 0.9991807630113525, 0.9991807656209275, 0.99918076821873358, 0.99918077080486967, 0.99918077337942057, 0.99918077594249921, 0.99918077849420306, 0.99918078103463559, 0.99918078356387896, 0.99918078608202843, 0.99918078858918724, 0.99918079108545022, 0.99918079357089207, 0.99918079604563603, 0.99918079850973829, 0.99918080096330963, 0.99918080340643634, 0.99918080583919022, 0.99918080826167666, 0.99918081067397591, 0.99918081307616724, 0.99918081546834026, 0.99918081785057389, 0.99918082022295562, 0.99918082258556862, 0.99918082493848104, 0.99918082728177815, 0.99918082961554588, 0.99918083193985818, 0.99918083425478721, 0.99918083656041545, 0.99918083885681197, 0.99918084114406647, 0.99918084342222901, 0.99918084569138876, 0.99918084795161144, 0.9991808502029772, 0.99918085244553789, 0.99918085467938456, 0.99918085690457259, 0.99918085912116483, 0.99918086132924588, 0.99918086352886348, 0.99918086572008002, 0.99918086790297922, 0.99918087007759959, 0.99918087224401919, 0.99918087440228576, 0.99918087655246846, 0.99918087869461614, 0.99918088082878187, 0.99918088295502594, 0.99918088507340952, 0.99918088718395781, 0.99918088928673565, 0.99918089138179755, 0.99918089346917316, 0.99918089554891509, 0.99918089762105644, 0.99918089968563084, 0.9991809017426887, 0.99918090379225488, 0.99918090583435049, 0.99918090786901403, 0.99918090989625896, 0.99918091191610792, 0.99918091392857566, 0.99918091593367642, 0.9991809179314014, 0.99918091992177416, 0.99918092190478169, 0.99918092388041257, 0.99918092584866236, 0.99918092780950096, 0.99918092976292083, 0.99918093170888767, 0.99918093364734628, 0.9991809355782858, 0.99918093750164583, 0.99918093941737973, 0.999180941325432, 0.99918094322572337, 0.99918094511821698, 0.9991809470028179, 0.99918094887946318, 0.999180950748076, 0.99918095260857964, 0.99918095446089306, 0.9991809563049332, 0.99918095814063568, 0.99918095996792455, 0.99918096178671145, 0.99918096359695352, 0.99918096539858847, 0.99918096719156746, 0.99918096897585418, 0.99918097075142043, 0.999180972518244, 0.99918097427634001, 0.99918097602570244, 0.99918097776637149, 0.99918097949837525, 0.99918098122178578, 0.99918098293664781, 0.99918098464307337, 0.99918098634113783, 0.99918098803095756, 0.99918098971265246, 0.99918099138633987, 0.99918099305216246, 0.99918099471028687, 0.99918099636082491, 0.99918099800395732, 0.9991809996398191, 0.99918100126858622, 0.99918100289040135, 0.9991810045054319, 0.99918100611383387, 0.99918100771575313, 0.99918100931133047, 0.9991810109007262, 0.99918101248407165, 0.99918101406149795, 0.99918101563313821, 0.99918101719911379, 0.99918101875955356, 0.99918102031454936, 0.99918102186420843, 0.99918102340863302, 0.99918102494791938, 0.99918102648215468, 0.9991810280114054, 0.99918102953576537, 0.99918103105529321, 0.99918103257004209, 0.99918103408008863, 0.99918103558547222, 0.99918103708624773, 0.99918103858245289, 0.99918104007412778, 0.99918104156131549, 0.99918104304404076, 0.9991810445223176, 0.99918104599619173, 0.99918104746567737, 0.99918104893079418, 0.99918105039154703, 0.99918105184797057, 0.99918105330006524, 0.9991810547478458, 0.99918105619132369, 0.99918105763051035, 0.99918105906541599, 0.99918106049605737, 0.99918106192243938, 0.99918106334458634, 0.99918106476250745, 0.99918106617622049, 0.99918106758575287, 0.99918106899111392, 0.99918107039233617, 0.99918107178944948, 0.9991810731824905, 0.99918107457147731, 0.99918107595646066, 0.99918107733748129, 0.99918107871459483, 0.99918108008784412, 0.99918108145727969, 0.99918108282297213, 0.99918108418498164, 0.99918108554337626, 0.9991810868982407, 0.99918108824963969, 0.99918108959767604, 0.99918109094243024, 0.99918109228399854, 0.99918109362249607, 0.99918109495802954, 0.99918109629072005, 0.99918109762071183, 0.99918109894811802, 0.9991811002731058, 0.99918110159583862, 0.99918110291648099, 0.99918110423522899, 0.99918110555229189, 0.99918110686788131, 0.99918110818223793, 0.99918110949561378, 0.9991811108082943, 0.99918111212056349, 0.99918111343274441, 0.99918111474518345, 0.99918111605821569, 0.99918111737224491, 0.99918111868764847, 0.99918112000485493, 0.99918112132428505, 0.99918112264637826, 0.99918112397159486, 0.99918112530036984, 0.99918112663317438, 0.99918112797044412, 0.99918112931260483, 0.99918113066008563, 0.99918113201327585, 0.999181133372543, 0.99918113473822856, 0.9991811361106161, 0.99918113748996262, 0.99918113887649251, 0.99918114027034421, 0.99918114167165473, 0.99918114308048356, 0.99918114449685835, 0.99918114592073859, 0.99918114735205399, 0.99918114879069497, 0.99918115023650189, 0.99918115168927513, 0.99918115314878075, 0.99918115461477575, 0.99918115608695079, 0.99918115756500125, 0.99918115904859039, 0.9991811605373756, 0.99918116203099971, 0.99918116352907838, 0.99918116503123977, 0.99918116653711209, 0.99918116804629908, 0.99918116955842828, 0.99918117107312465, 0.99918117259002137, 0.99918117410876717, 0.99918117562898801, 0.99918117715036425, 0.99918117867256695, 0.99918118019529267, 0.99918118171822534, 0.99918118324109206, 0.99918118476361306, 0.99918118628554031, 0.99918118780664811, 0.99918118932669586, 0.99918119084547385, 0.99918119236278824, 0.99918119387846738, 0.99918119539233841, 0.99918119690424712, 0.99918119841405217, 0.99918119992161969, 0.99918120142684563, 0.99918120292960988, 0.99918120442981628, 0.99918120592738102, 0.99918120742224281, 0.99918120891431461, 0.99918121040353325, 0.99918121188985465, 0.99918121337323274, 0.99918121485362854, 0.99918121633100243, 0.99918121780533453, 0.99918121927659376, 0.99918122074478211, 0.99918122220985162, 0.99918122367182372, 0.99918122513067642, 0.99918122658641595, 0.99918122803903187, 0.99918122948853838, 0.9991812309349335, 0.99918123237823564, 0.99918123381843782, 0.99918123525557268, 0.99918123668963044, 0.99918123812064663, 0.99918123954862859, 0.99918124097358996, 0.99918124239555295, 0.99918124381454809, 0.99918124523056717, 0.99918124664365993, 0.99918124805383179, 0.99918124946111109, 0.99918125086551413, 0.99918125226706522, 0.99918125366579369, 0.99918125506172295, 0.99918125645486644, 0.99918125784526368, 0.99918125923291745, 0.99918126061786983, 0.99918126200013224, 0.99918126337974, 0.99918126475670177, 0.99918126613105906, 0.99918126750281877, 0.99918126887202818, 0.99918127023868131, 0.99918127160282899, 0.99918127296447623, 0.99918127432365822, 0.99918127568038229, 0.99918127703468484, 0.99918127838659376, 0.99918127973611659, 0.99918128108329352, 0.99918128242812809, 0.99918128377065785, 0.99918128511089432, 0.99918128644886672, 0.99918128778459758, 0.99918128911810344, 0.99918129044941284, 0.99918129177854276, 0.99918129310550541, 0.99918129443035231, 0.9991812957530618, 0.99918129707368109, 0.9991812983922409, 0.99918129970873337, 0.99918130102319525, 0.99918130233564906, 0.99918130364610325, 0.99918130495458934, 0.9991813062611149, 0.99918130756571621, 0.99918130886839385, 0.99918131016918654, 0.99918131146809208, 0.99918131276513944, 0.99918131406036281, 0.99918131535375054, 0.99918131664534227, 0.99918131793514608, 0.99918131922318953, 0.99918132050947883, 0.99918132179404362, 0.99918132307689023, 0.99918132435804141, 0.99918132563752005, 0.99918132691533457, 0.99918132819150118, 0.99918132946604499, 0.99918133073897641, 0.99918133201030312, 0.99918133328006309, 0.9991813345482633, 0.99918133581490698, 0.99918133708001677, 0.99918133834362455, 0.99918133960572242, 0.99918134086634203, 0.99918134212548759, 0.99918134338317977, 0.99918134463942332, 0.99918134589425001, 0.9991813471476565, 0.99918134839967354, 0.99918134965031247, 0.99918135089956206, 0.99918135214746495, 0.99918135339402137, 0.99918135463925495, 0.99918135588316936, 0.99918135712577194, 0.99918135836708788, 0.99918135960711951, 0.99918136084588627, 0.99918136208339015, 0.99918136331965279, 0.9991813645546751, 0.99918136578849004, 0.99918136702107929, 0.99918136825247228, 0.99918136948266745, 0.99918137071167679, 0.99918137193952206, 0.99918137316620448, 0.99918137439172316, 0.99918137561609865, 0.99918137683933894, 0.99918137806143481, 0.99918137928241468, 0.99918138050227667, 0.99918138172102455, 0.99918138293866421, 0.99918138415520208, 0.99918138537064149, 0.99918138658499323, 0.99918138779825039, 0.99918138901042275, 0.99918139022150609, 0.99918139143149554, 0.99918139264041106, 0.99918139384823434]}
vb_all_performances = eval(open(folder+'nmtf_vb_performances.txt','r').read())
# Gibbs NMTFF
#gibbs_all_performances = {'R^2': [-1275.1106428818866, -0.37992071447006426, 0.8650269600324816, 0.9300904020467348, 0.9360424809220652, 0.9386272259269968, 0.9415301182158252, 0.9453860665004572, 0.9504441476841898, 0.9564132954144573, 0.962468146527695, 0.9679926343358807, 0.9716961211641357, 0.9739512620547103, 0.9752598152643973, 0.9761945765101814, 0.9769168686278003, 0.9773054614143125, 0.9777454484336157, 0.9780609809584355, 0.9780967461244384, 0.9779597705366482, 0.9780623648797686, 0.9783536557417841, 0.9783731071419491, 0.978345440236002, 0.978414394346419, 0.9785316002856954, 0.9786294887515125, 0.9785839720162592, 0.9786413615948524, 0.9786660587634429, 0.9785747062193084, 0.9788089110929671, 0.97883866990551, 0.9787462439477624, 0.9786492370125149, 0.9788430612275272, 0.9787954268035237, 0.9789551272937123, 0.9789205520257065, 0.9789716414866803, 0.9791954351385403, 0.9791813215837024, 0.9793839365431662, 0.9795137886482082, 0.9798192757720723, 0.9801105889126535, 0.9803963646308221, 0.98058682610937, 0.9811524600643902, 0.9818059378878389, 0.9822675919609931, 0.9829902804947755, 0.9838114093723913, 0.9846715943519253, 0.9856371139757977, 0.9863286312735091, 0.9874366293875962, 0.988406731957402, 0.9893761401196456, 0.9902941176929393, 0.9912010819145697, 0.9919438422896384, 0.9925299469251245, 0.9931401978254994, 0.9935523979197843, 0.9940191008925233, 0.9943437502701735, 0.9946790904963331, 0.9949201038953126, 0.9951552490802649, 0.9953248178935661, 0.9955158243628611, 0.9956461101588946, 0.9957306863138069, 0.9958487717238478, 0.9958940287186973, 0.9959553117555129, 0.9960058582900927, 0.9960104964323059, 0.9960665785865718, 0.9960752298628333, 0.9961112813727157, 0.9961288579504574, 0.996143223340018, 0.9961581495191661, 0.9961705049142657, 0.9961888312084214, 0.9961736017685714, 0.9961957733115463, 0.9962037309708256, 0.9962182010163925, 0.9962220430593709, 0.996212414011442, 0.9962483827734544, 0.9962461909229448, 0.9962283791860891, 0.9962552617541618, 0.9962394569495863, 0.9962323923077706, 0.996240642205172, 0.9962483169461014, 0.9962236110630222, 0.9962596376549888, 0.9962977169574888, 0.9962713606341382, 0.9962961528227106, 0.9963194394041549, 0.9962862175425722, 0.9962996811107981, 0.9962962028317505, 0.9962720538136113, 0.9962714606633514, 0.9962802082659121, 0.9963255433901653, 0.9962867051851813, 0.9963066211014197, 0.9963005851163524, 0.9962762659855023, 0.996276361065216, 0.996283238613357, 0.9962666870993109, 0.9962776440025031, 0.99627383245159, 0.9962243891623804, 0.996263628423123, 0.9962523753914228, 0.9962702532191234, 0.9962982833248905, 0.9963136952325883, 0.9962983364316543, 0.996306936186879, 0.9963009897184111, 0.9963071133690126, 0.9963109928725586, 0.9963017602188274, 0.9962953604516515, 0.9963009324057822, 0.99628726350478, 0.9962894815013694, 0.996280422205863, 0.9962970911713032, 0.9962826271865919, 0.9962931110320956, 0.9962776645016099, 0.9962991342096533, 0.9962787895128393, 0.9963080880405749, 0.9962910430383873, 0.9962715123857819, 0.9962970504844637, 0.9962897179583178, 0.9962825949838957, 0.9962971243040517, 0.9962944580799609, 0.9963017751827383, 0.9962843363607, 0.9962661049683561, 0.9962957942444719, 0.9962905593265803, 0.9962973932238753, 0.9962811748388198, 0.9962846678491369, 0.9963100327718529, 0.9963052395405482, 0.9962827224459458, 0.9962963518787303, 0.9962749051582516, 0.9962876883759287, 0.9962966770788587, 0.9962785305141199, 0.9962818060030828, 0.9962573345627581, 0.9962774616718756, 0.9962900646322673, 0.9962994867004539, 0.9962578453168468, 0.9962874351890499, 0.9962961326320556, 0.996307910204831, 0.9962631700533161, 0.9962925177531474, 0.996298891003326, 0.9963002926638019, 0.9962858827472394, 0.9962658511976383, 0.9962940197914941, 0.9963135014005631, 0.9963062944220185, 0.9963056997342837, 0.9963295324391366, 0.9963153860141468, 0.9963199275261577, 0.9963526738457142, 0.9963213902457309, 0.9962945462529248, 0.9962881382432729, 0.996307119847869, 0.996312729548403, 0.9962994657308281, 0.9962913517337846, 0.9963217969533635, 0.9962968695543672, 0.996271751979019, 0.99630162431902, 0.9962960066559745, 0.9962955924714791, 0.9963074163430665, 0.9963163679817167, 0.9963045547167165, 0.9963017453800015, 0.996281094939729, 0.9963102337102552, 0.9962836154142457, 0.9962700824930949, 0.9963018944745533, 0.9963079285318048, 0.9962945951232272, 0.9963115099236401, 0.9963080384229955, 0.996290582802658, 0.9962996366594921, 0.9963213387568833, 0.996304860984905, 0.9963188561381352, 0.9963040055637181, 0.9962816557402875, 0.9962837025274116, 0.9963095165748576, 0.9963050205984785, 0.9963026249800869, 0.9963012288255383, 0.9962989025295206, 0.9963138457668413, 0.9962949671921335, 0.9963120127522529, 0.9963098015807169, 0.9962677136219075, 0.9962682396749093, 0.9963133473798834, 0.9963039737719227, 0.9963078499391684, 0.996290764201431, 0.996310056539151, 0.9963187935667555, 0.9963049224590332, 0.996310387873004, 0.9963118958066265, 0.9963159009164989, 0.9963030577945399, 0.9963128890411034, 0.9962995902872203, 0.9962811204237416, 0.9962823364162716, 0.9963034266396437, 0.9963095200509472, 0.996294138325825, 0.9962820757332824, 0.9963118737865722, 0.9963178184079623, 0.9963282429111089, 0.9963257377932858, 0.9963165526578478, 0.9962915549511798, 0.996276770608938, 0.9962920894481975, 0.9963013726726959, 0.99628524839574, 0.9962833897027107, 0.9962767648511427, 0.9962721909671783, 0.996281240860881, 0.9962862432338329, 0.9962569616686736, 0.9962624530626292, 0.9962596024156363, 0.9962743483994054, 0.9963037242754413, 0.9963168831872186, 0.9963099481733366, 0.9962871405231404, 0.9962851630465138, 0.996316280588272, 0.9963285864481356, 0.996271029528235, 0.9962877236031702, 0.996314181414694, 0.9963134574732019, 0.9963087369558885, 0.9963222568458282, 0.9963273682699058, 0.9963176515486576, 0.9963045782060016, 0.9962728320211388, 0.9962725738971893, 0.9962607339802285, 0.9962913401372103, 0.996305478031007, 0.996315918380076, 0.9962741240306974, 0.9963089046686339, 0.9963179223709551, 0.9963150790394696, 0.9963029072623402, 0.9962927883263293, 0.9962993832386692, 0.9962768741140826, 0.996281089294913, 0.9962894027962865, 0.9963077665267995, 0.9962987170194125, 0.9962877120293258, 0.9963037576303359, 0.996292479556152, 0.9963068221067726, 0.9962842356347676, 0.9962877923585052, 0.9963091495047186, 0.9963228081039313, 0.9963034122448527, 0.9962847251250304, 0.9963370202142698, 0.9963026827539604, 0.9963161944455879, 0.9962897616232818, 0.9963186681981041, 0.9963150303170978, 0.996316372868091, 0.9962869202729326, 0.996293978393247, 0.996309048970446, 0.9962989547383304, 0.9962876336719803, 0.9962967138644411, 0.9962905919357963, 0.9963024763641609, 0.9963052047024039, 0.9963035242966131, 0.996288651628865, 0.996293689491556, 0.9963189956222069, 0.996309381365982, 0.9962988638640794, 0.996297311861104, 0.9963057178835356, 0.996311693446558, 0.9963100754301786, 0.9963183648605365, 0.9963042413556722, 0.996304503304247, 0.9963295034716467, 0.9963249749756177, 0.9963126407716558, 0.9963169106165077, 0.996319304779625, 0.9963359416432769, 0.9963063814553095, 0.9963156367920442, 0.9963101393394509, 0.9963377456669577, 0.9963332144464339, 0.9963005605969064, 0.9963310179542935, 0.996333769201895, 0.9963370907313469, 0.9963155766565228, 0.9962916267321331, 0.9962831601142716, 0.9963001185440848, 0.996307729403192, 0.9962859625428349, 0.9962948968591817, 0.9962989357745224, 0.9962895101545298, 0.9962774299384708, 0.9963090808643583, 0.9963037700851501, 0.9962897773470204, 0.9962999958003249, 0.9962908870613731, 0.9962950814767754, 0.9963044109361513, 0.9963164838079875, 0.9963167996340062, 0.9963124612534319, 0.9962881337311295, 0.9962884765653512, 0.9963098728056352, 0.9963128580789872, 0.9963139341760794, 0.9963193774634161, 0.996315900994688, 0.996314339122549, 0.9963255057674215, 0.9963404284036445, 0.9963134007451029, 0.996314431064792, 0.9963407534318971, 0.9963338735308002, 0.9963232854808706, 0.9963309362700377, 0.9963353390798125, 0.9963203065368472, 0.9963245960125651, 0.9963331307882287, 0.9963086769012556, 0.996291600531118, 0.996288217928411, 0.9963139893619922, 0.9963376384103326, 0.9963264963472802, 0.9963167986379431, 0.9963234292062515, 0.9963386520465771, 0.9963389666882745, 0.996339509616824, 0.9963206561707115, 0.9963005470082347, 0.9963265613208474, 0.9963132049679042, 0.9963245334069177, 0.9963065124233832, 0.9963076501862764, 0.9963067365100288, 0.9963177178368947, 0.9963462002205045, 0.996331783933952, 0.9963186561036557, 0.9963258387864034, 0.9963195611207202, 0.9963336508431525, 0.9963498722681984, 0.9963464948297648, 0.9963309870228529, 0.9963125452424233, 0.996298668787706, 0.9962964343221463, 0.9963271309580546, 0.9963386593801489, 0.9963196773339397, 0.9963497425635681, 0.996343813913849, 0.9963479707564997, 0.9963337757110095, 0.9963418599202809, 0.9963519685372381, 0.99635102758369, 0.996373906545462, 0.996349521296831, 0.9963607193377401, 0.9963697659066715, 0.9963513020049743, 0.9963507419076811, 0.9963503046167944, 0.9963343737768702, 0.9963436782427224, 0.9963584397596572, 0.9963395588706941, 0.9963340982629715, 0.9963528906552421, 0.9963565334530804, 0.9963487214789616, 0.9963485638543617, 0.9963714756290805, 0.9963466214788205, 0.9963404385322814, 0.9963383701553562, 0.9963526539208639, 0.9963784313062151, 0.9963615959390574, 0.9963643889412236, 0.9963583368326201, 0.9963709591646381, 0.9963781260413953, 0.9963683055025203, 0.9963833194702587, 0.9963977714886487, 0.9963763240556244, 0.9963984364441392, 0.9964194476340968, 0.99641686380756, 0.9964157963842678, 0.9964067674074064, 0.9964229410738028, 0.9964384597558579, 0.9964373967077654, 0.9964264274774052, 0.9964593823145553, 0.9964544386439982, 0.9964516888307249, 0.9964714007328104, 0.9964774696127584, 0.9964673676415685, 0.9964818522958772, 0.9964906074980519, 0.99648624531391, 0.9965025558489098, 0.9965063423445472, 0.9965171969905228, 0.9965337251753996, 0.9965232405271564, 0.9965609482442177, 0.9965518672801816, 0.9965468226700919, 0.9965898382060376, 0.9965686782206236, 0.9965740658727689, 0.996595721619472, 0.9966025858668347, 0.9966186075836457, 0.9966395225766272, 0.9966678172713795, 0.9966487928741197, 0.9966864667986303, 0.9967024164106785, 0.9967185026395159, 0.9967450906063757, 0.9967613631652064, 0.996781525847905, 0.9968244102854436, 0.996806997027505, 0.9968512232872123, 0.9968903731434026, 0.9968854362079232, 0.9969258826194384, 0.9969378812915937, 0.9969706081453288, 0.9969833673084257, 0.9970075656107446, 0.9970241888812256, 0.9970687137102949, 0.9970836010131789, 0.9971313945112529, 0.9971796179439543, 0.9971975453068627, 0.997224653075566, 0.9972408887938764, 0.9972888285970184, 0.9972909139226938, 0.9973513132215344, 0.9973627565303684, 0.9974047766203191, 0.9974257153786132, 0.9974615480645296, 0.9974946865712577, 0.997526989510611, 0.997554310569108, 0.9975767146308318, 0.9976136294917652, 0.9976455592009957, 0.9976813119263245, 0.9976964079239569, 0.9977155216151476, 0.9977311212571416, 0.9977546078596653, 0.9977810946756503, 0.9977924647811363, 0.9978036482048501, 0.9978199655968216, 0.9978158949303779, 0.9978396913294404, 0.9978577297848851, 0.9978797211490165, 0.9978862675615309, 0.9979037688982748, 0.9979171500023893, 0.9979072318907167, 0.9979297874275953, 0.997940564461336, 0.9979444004553183, 0.9979549089283256, 0.9979661874355013, 0.9979786333419998, 0.9979623688967133, 0.9979715543504816, 0.9979946583433303, 0.9979976443995778, 0.9980071588127634, 0.9980068077588271, 0.9979954172749801, 0.9980059945558567, 0.9980216548139503, 0.998013733013959, 0.9980220514483719, 0.9980186890840279, 0.9980252444607243, 0.9980298192599723, 0.9980219448699229, 0.998031674443582, 0.998034088209609, 0.9980284728963869, 0.9980399118561271, 0.9980326985584824, 0.9980496474532513, 0.998049547827637, 0.9980389030166206, 0.9980427266642908, 0.9980417146990701, 0.9980472395287507, 0.9980510225546224, 0.9980481532280211, 0.9980441296244053, 0.9980461916428487, 0.998058546446039, 0.9980649652010214, 0.9980628843314399, 0.9980635184139072, 0.9980702135965445, 0.9980872412221141, 0.998082145814232, 0.9980662649623381, 0.9980828094865242, 0.9980855768734368, 0.9980891376493557, 0.9980815390122645, 0.9980924090483814, 0.9980853056927917, 0.9980877844778219, 0.9980878173503475, 0.99808974682066, 0.9981021834254856, 0.9981034519876844, 0.9981247525737529, 0.9981171963277272, 0.9981136760906324, 0.9981102336998927, 0.9981038999276579, 0.9981064784493022, 0.9981179761156589, 0.9981067817351584, 0.9981156020335638, 0.9981067942284517, 0.9981009881995812, 0.9981105982747833, 0.9981078004772019, 0.9981219009330963, 0.9981230262249867, 0.9981201208540217, 0.998122908823602, 0.9981254800406505, 0.9981245526364966, 0.9981283033629733, 0.9981274140645021, 0.9981314108648986, 0.9981419677540986, 0.9981480613313048, 0.9981431084030037, 0.9981424857697271, 0.9981324758956879, 0.9981435280880223, 0.9981480192226464, 0.9981576496346158, 0.998160456319855, 0.9981473333117427, 0.9981345997211906, 0.9981422832575896, 0.9981369199814445, 0.9981321749931206, 0.9981345595044112, 0.998138036210126, 0.9981441995914392, 0.9981416013057355, 0.9981507391035233, 0.9981383073888213, 0.9981422826498028, 0.9981368012618188, 0.9981236006660389, 0.9981388253660054, 0.9981388214682831, 0.9981412801847386, 0.9981384915452448, 0.998143164617839, 0.9981498742842253, 0.9981350756110725, 0.9981391763217415, 0.9981500442957364, 0.998148323515523, 0.998154016010505, 0.9981378058109379, 0.9981330979545331, 0.9981453856386598, 0.9981432172304917, 0.9981378686707145, 0.9981415599156287, 0.9981317534626829, 0.998132122943729, 0.9981416784459659, 0.9981481270005975, 0.998159173876469, 0.9981563198873172, 0.9981626293483112, 0.9981702104156911, 0.9981632165527604, 0.9981655177701239, 0.9981699093666948, 0.9981469650330667, 0.9981546211264979, 0.9981579098397521, 0.9981571759502376, 0.9981455593672662, 0.998159848155204, 0.9981434341772193, 0.9981552810540435, 0.9981433799414833, 0.9981527012444984, 0.9981414030587812, 0.9981505407702601, 0.9981458160987657, 0.9981340076018752, 0.9981489629366802, 0.9981597678767574, 0.9981562329352732, 0.9981707622162862, 0.9981681359949255, 0.9981622715839595, 0.9981604271149888, 0.9981661166342368, 0.9981676796846228, 0.9981598077387596, 0.9981648444372565, 0.998164037413411, 0.998165899407106, 0.9981512836205412, 0.9981596514292663, 0.9981579290268069, 0.99816054024751, 0.9981679216432446, 0.9981631745126794, 0.9981687347468321, 0.998183001120388, 0.9981586821888294, 0.9981627964262626, 0.9981673891731444, 0.9981753696166328, 0.9981625599067456, 0.9981562849685007, 0.9981520954615188, 0.9981727825608198, 0.9981645973475819, 0.998170853870934, 0.9981770931408155, 0.9981677970195622, 0.998156578949424, 0.9981725583930563, 0.998165217854785, 0.9981776239724067, 0.9981773380564545, 0.9981686898554316, 0.9981616648348246, 0.9981589227139516, 0.998157017133188, 0.9981405310429212, 0.9981460661071159, 0.9981450883579248, 0.9981590855939311, 0.9981594873126073, 0.9981801788818686, 0.9981683925737491, 0.9981637750395801, 0.9981701467785381, 0.9981588234967886, 0.9981609235153761, 0.9981525035273695, 0.9981510803587438, 0.9981563910958341, 0.998161067291534, 0.9981581424184099, 0.9981614423660601, 0.9981699055846845, 0.9981826552790467, 0.9981718333172602, 0.998181414848731, 0.998186895039704, 0.9981773553505878, 0.9981809547519129, 0.998186284330298, 0.9981845272013838, 0.9981851219951468, 0.9981748470258988, 0.9981761812032439, 0.9981732868136776, 0.9981744970007224, 0.9981757566999631, 0.9981826951447836, 0.9981792341846121, 0.9981879584479851, 0.9981910921441011, 0.9981885126899375, 0.9981865556057565, 0.9981842482485752, 0.9981806042088173, 0.9981722467931253, 0.9981759658488627, 0.9981728395036964, 0.9981769276237829, 0.9981837921585619, 0.9981825976201842, 0.9981751281376136, 0.9981666121272667, 0.9981687588646011, 0.9981791817021664, 0.9981859922592827, 0.9981968588501706, 0.9981771003916298, 0.9981714198111732, 0.9981673448296722, 0.9981792043536538, 0.9981620459925716, 0.9981594608482458, 0.9981525751685664, 0.9981680796617843, 0.9981575740834959, 0.9981592126432546, 0.9981738634432141, 0.9981756907206607, 0.9981751662282473, 0.9981810610513325, 0.9981914471041059, 0.9981825895217834, 0.9981907595240491, 0.9981864441176517, 0.9981944206561585, 0.9981877128826856, 0.9981970441634023, 0.9981943827909768, 0.9981879757099079, 0.9981808422938779, 0.9981784772097445, 0.9981883449990808, 0.9981831655641763, 0.9981923042563046, 0.9981963820437083, 0.9981946486699932, 0.9981966526496072, 0.9981985405301881, 0.998202537475154, 0.9981837558752327, 0.9981834870792051, 0.9981971402918726, 0.9981870318377097, 0.9981838002335086, 0.9981825839671642, 0.9981901245581841, 0.9982053635039941, 0.9982016049385438, 0.9982000397632751, 0.998178032048949, 0.9981831210197661, 0.9981898960639717, 0.9981937523652938, 0.9981856725105753, 0.9982024118504073, 0.9982043750267321, 0.9981925516498894, 0.998207131260934, 0.998195683452336, 0.9981973341320495, 0.9982138241067994, 0.9982042362262823, 0.9982005633837778, 0.9981940937851309, 0.9981924815660257, 0.9981871588286787, 0.99819254378074, 0.9981857288175247, 0.9981804575741499, 0.9981984519639862, 0.9981901540252817, 0.998194032339016, 0.998198314969221, 0.9982097643437837, 0.9981951782104049, 0.9982069949498776, 0.9982069854957895, 0.9982060483042577, 0.9981958252645744, 0.9981959020784444, 0.9981927680670323, 0.9981874321319141, 0.9981974448229047, 0.9981870634085156, 0.9981905455359301, 0.9981930220653903, 0.9981932959704789, 0.9982037339676311, 0.9981962240608332, 0.9982066885572536, 0.9982022411923419, 0.9981953385717169, 0.9981984300516221, 0.9981905998390109, 0.9981931343269584, 0.9981921704174753, 0.998189508311101, 0.9982044400389988, 0.9981994121383697, 0.9982005639465914, 0.9982047077320004, 0.9981983644758362, 0.9981939203053047, 0.9981978027718527, 0.9981966960222173, 0.9981926664503302, 0.9981909234930568, 0.9981848687041391, 0.9981922397623048, 0.9981910126978538, 0.9981938503742314, 0.9981919852336643, 0.9981968951146478, 0.9982132777643606, 0.9982071785086577, 0.9982024481471738, 0.9982072120264837, 0.9982004589130993, 0.9982118534655054, 0.9982096016230607, 0.9982057673932598, 0.9982211061835967, 0.998219835629201, 0.9982072509978421, 0.9982063412647688, 0.9982046405015331, 0.9981944879337978, 0.9981905715859221, 0.9982104790703172, 0.998201204269063, 0.9981981023450849, 0.9981965758004605, 0.9981963610826793, 0.9982040515634797, 0.9982100093171214, 0.9982120909550862, 0.9982174676939908, 0.9982199855277359, 0.9982021905995628, 0.9982082661040341, 0.9982090430207196, 0.9982071281207233, 0.9982125239089551, 0.9982193512107285, 0.9981945241107684, 0.9982035138826257, 0.9982015862606847, 0.998196021851928, 0.9981922281069316, 0.9981944612006275, 0.9981942835252187, 0.9981872123528658, 0.9981882720170917, 0.9981781331469265, 0.9981861476309893, 0.9982015164314209, 0.9982088173367852, 0.9982049190163267, 0.99819448875148, 0.9981897980140324, 0.9982012926880234, 0.9981934489214069, 0.9981989451661846, 0.9982004826666635, 0.9981982937730328, 0.9982172591929883, 0.9982148318080818, 0.9982063948050807, 0.9982037260475766, 0.9981939416665856, 0.9981993883057213, 0.9981996702638883, 0.9981810516800342, 0.998192783452122, 0.9981959568882076, 0.9981893967897392, 0.9981956819870111, 0.998211995233942, 0.9982081419710858, 0.9981968824678096, 0.9982078713373047, 0.9982098272509561, 0.9982079831995674, 0.9982051328005226, 0.9982055718098118, 0.9982010471978161, 0.998206993472456, 0.998200686249579, 0.998207679769837, 0.9982017583293732, 0.9982098076830461, 0.9982104707371886, 0.9982146515724106, 0.998218506510693, 0.9982096009688205, 0.9982091263468194, 0.9982050725929269, 0.9982177474713058, 0.9982319773693566, 0.9982156492880674, 0.9982153191249595, 0.9982022934742845], 'MSE': [731376.08233576361, 790.87265019894994, 77.356970371674095, 40.067221564157059, 36.655912244017209, 35.17451979102853, 33.510787886989682, 31.300831904070687, 28.401898632939719, 24.980806656160009, 21.510595580790138, 18.344351123383351, 16.221775230344612, 14.929288470780577, 14.179318610904282, 13.643579784749774, 13.229613188488861, 13.006899373343913, 12.75473003030813, 12.573889173647697, 12.553391117980523, 12.631895806686344, 12.573096008126718, 12.406148752675579, 12.395000590149136, 12.410857297711937, 12.371337693813107, 12.304163565008079, 12.248060841443692, 12.274147804745779, 12.241256170001918, 12.227101505193458, 12.279458302163373, 12.145228685071853, 12.128173045066033, 12.181145046561859, 12.236742539347045, 12.125656250843599, 12.152956923090416, 12.061427932628096, 12.081244023184849, 12.05196316887641, 11.923700525459042, 11.931789413756306, 11.815664894223577, 11.741242880428461, 11.566159334889502, 11.399199310945198, 11.235412944655772, 11.126253938112526, 10.80207268598812, 10.427545560873128, 10.162958199815295, 9.7487644059346312, 9.2781515911655603, 8.7851545897497214, 8.2317872435364059, 7.8354585906184839, 7.2004326824524068, 6.6444387167833714, 6.0888427362617374, 5.5627231204019658, 5.0429155763345461, 4.6172180270960048, 4.2813044332930286, 3.9315519135976449, 3.6953080645410146, 3.4278270324532247, 3.2417610425776475, 3.0495678168364537, 2.9114360360857976, 2.7766674992745575, 2.679482686147002, 2.5700113295737381, 2.4953407548720343, 2.446867703429521, 2.3791895243794388, 2.3532514258485731, 2.3181283370867871, 2.2891586496675451, 2.2865003956203047, 2.2543580837410016, 2.2493997859831882, 2.2287376183199394, 2.2186639709906624, 2.2104307488975863, 2.2018761220002925, 2.194794886126219, 2.1842915545396648, 2.1930199889554385, 2.1803128335604356, 2.1757520678457025, 2.1674588643551171, 2.1652568779071291, 2.1707755650132716, 2.1501608225582856, 2.1514170357618729, 2.1616254596111335, 2.146218278870315, 2.1552764715255051, 2.1593254229883097, 2.1545971671159125, 2.1501985501266678, 2.1643582094617826, 2.1437103229799548, 2.121885968457224, 2.136991543059573, 2.1227824195390128, 2.1094362032036797, 2.1284766172209388, 2.1207602551340639, 2.1227537579054272, 2.1365942617656839, 2.1369342134114886, 2.1319207082531459, 2.1059378314913024, 2.1281971350749158, 2.1167827448918879, 2.120242143288217, 2.1341801436850654, 2.1341256507389739, 2.1301839280964519, 2.1396700816421985, 2.1333903621085302, 2.1355748726680126, 2.1639122581146033, 2.1414230978245659, 2.1478725372091376, 2.1376262347765524, 2.1215613668455195, 2.1127283548051681, 2.1215309298162732, 2.1166021602614431, 2.1200102540941042, 2.1165006120332963, 2.1142771558456972, 2.1195686579213024, 2.1232365504125488, 2.1200431016267021, 2.1278771458928705, 2.1266059476113823, 2.1317980930520664, 2.1222446247002122, 2.1305343545383528, 2.1245257581090042, 2.1333786134748802, 2.1210737000684143, 2.1327338368560627, 2.1159419994452646, 2.1257109853814731, 2.1369045697839657, 2.1222679435098879, 2.126470427264842, 2.1305528108390543, 2.1222256353594697, 2.123753725843696, 2.1195600816692406, 2.1295547772029777, 2.1400037178041753, 2.1229879311448454, 2.125988214670643, 2.1220715095960103, 2.1313667372394005, 2.1293647915495608, 2.1148274174525286, 2.1175745575637412, 2.1304797586351598, 2.1226683347512414, 2.1349600733165568, 2.1276336398943991, 2.1224819531101069, 2.1328822765464821, 2.1310049986640194, 2.1450302920455964, 2.1334948610881468, 2.1262717383529925, 2.1208716773502796, 2.1447375640392004, 2.1277787486501034, 2.1227939913899689, 2.1160439222764542, 2.1416858028772063, 2.1248657835098457, 2.1212130886801206, 2.1204097563410387, 2.1286684981524284, 2.1401491611749024, 2.1240049216968564, 2.1128394455699118, 2.1169699743472128, 2.117310807164583, 2.1036515916380791, 2.1117593187742925, 2.1091564463032135, 2.0903885792363699, 2.1083181192216398, 2.1237031913564843, 2.127375807850163, 2.1164968988124615, 2.1132818164663294, 2.1208836956520285, 2.1255340630812598, 2.108085023261947, 2.1223716398044843, 2.1367672519392369, 2.1196465460490512, 2.1228661919418341, 2.1231035731087982, 2.1163269688011446, 2.111196524632343, 2.1179670499967864, 2.1195771624835378, 2.131412529729519, 2.114712253816613, 2.129967972560519, 2.1377240828233766, 2.1194917120644727, 2.1160334185553475, 2.1236751823664237, 2.1139808188497811, 2.1159704367214771, 2.1259747598484355, 2.1207857314689389, 2.1083476289760283, 2.1177915188003942, 2.1097704899042786, 2.1182817855338789, 2.1310911186373906, 2.1299180454743736, 2.1151232649468605, 2.1177000396244634, 2.1190730381209013, 2.1198732148530746, 2.1212064826831689, 2.1126420792514966, 2.123461938867047, 2.1136926331639425, 2.1149599198089732, 2.139081751720084, 2.138780255462164, 2.1129277192959788, 2.1183000063354189, 2.1160784622785322, 2.1258707949417461, 2.1148137957235202, 2.1098063513798442, 2.1177562861916126, 2.114623898666625, 2.1137596580998728, 2.1114642132775989, 2.1188249795840046, 2.1131904065662601, 2.1208123087650805, 2.1313979241015577, 2.1307010034562284, 2.1186135837395521, 2.1151212726988544, 2.1239369862282338, 2.1308504084505695, 2.1137722784327471, 2.1103652432139954, 2.1043906576134317, 2.1058264134152105, 2.1110906813765138, 2.1254175932882466, 2.1338909293340049, 2.1251112575195208, 2.1197907718680451, 2.1290320634256257, 2.130097334398465, 2.1338942292937775, 2.1365156550718587, 2.1313288980781064, 2.1284618928130379, 2.145244008478576, 2.1420967310695405, 2.1437305196766578, 2.1352791679859595, 2.1184430000171361, 2.1109012453918043, 2.1148759033198945, 2.127947630243332, 2.1290809795466497, 2.1112466123544404, 2.1041937665632173, 2.1371813094718211, 2.127613450138897, 2.1124497097160941, 2.1128646216167617, 2.1155700872270935, 2.1078214455298498, 2.10489193989658, 2.1104608751290534, 2.1179535876051214, 2.1361482484235297, 2.1362961867579364, 2.1430819871352371, 2.1255407094148708, 2.1174378717532591, 2.1114542043942293, 2.135407760210748, 2.1154739661804176, 2.1103056590025893, 2.1119352548798922, 2.1189112539608415, 2.1247107101587321, 2.120930974304811, 2.1338316075286987, 2.1314157649375929, 2.1266510557808811, 2.1161262683304098, 2.121312803915429, 2.1276200834453656, 2.1184238833580915, 2.1248876753176043, 2.1166675428846222, 2.1296125061610223, 2.1275740444589673, 2.1153336435073946, 2.1075055034961121, 2.1186218338124125, 2.1293319650710676, 2.0993601301783125, 2.1190399262355979, 2.111295983229271, 2.1264454016054577, 2.1098782037960477, 2.1119631790865827, 2.1111937241093037, 2.1280738620359907, 2.1240286482349964, 2.1153912626196947, 2.1211765602975352, 2.1276649923165043, 2.1224608702241921, 2.1259695253816218, 2.1191582142256311, 2.117594524316289, 2.1185576136932878, 2.1270815716526572, 2.124194226187182, 2.1096905475305858, 2.1152007570814275, 2.1212286429707787, 2.1221181409639498, 2.1173004053010418, 2.1138756365339657, 2.1148029687268153, 2.110052055368473, 2.1181466463149046, 2.1179965159766447, 2.1036681937480712, 2.1062636063818561, 2.1133326969992368, 2.1108855248692922, 2.1095133604327678, 2.099978290548413, 2.1169200930396879, 2.1116155906758736, 2.1147663404661596, 2.0989443521674418, 2.1015413262882414, 2.1202561960950299, 2.1028001997452646, 2.1012233798170157, 2.0993197147927822, 2.1116500560901295, 2.1253764535385953, 2.1302289182027851, 2.1205095494093453, 2.116147544948356, 2.128622764978501, 2.1235022487250235, 2.1211874290067985, 2.1265895256527743, 2.1335130484242826, 2.1153729832920041, 2.118416745142182, 2.1264363898740446, 2.1205798974240464, 2.125800380339653, 2.1233964390186664, 2.1180494548157767, 2.1111301412316221, 2.1109491321651377, 2.1134355841086281, 2.127378393890619, 2.1271819056382926, 2.1149190987391213, 2.1132081518545252, 2.1125914093487785, 2.109471703240513, 2.1114641684651385, 2.1123593227627553, 2.1059593941787753, 2.0974067978347657, 2.1128971341383598, 2.1123066279922167, 2.0972205147005925, 2.1011635858905544, 2.1072319049037422, 2.1028470153653047, 2.1003236371752356, 2.1089392242662286, 2.1064808011167218, 2.1015892732360646, 2.1156045062819056, 2.1253914701015382, 2.1273301379824829, 2.1125597806989029, 2.099005824055189, 2.1053916640839612, 2.1109497030378486, 2.1071495317124813, 2.0984248796758531, 2.0982445493782507, 2.0979333812536813, 2.1087388389412398, 2.1202639841575968, 2.1053544258449728, 2.1130093397385576, 2.1065166822319794, 2.1168450314318412, 2.1161929464640572, 2.1167166008652147, 2.1104228834145347, 2.0940988019124918, 2.1023611945501068, 2.1098851354758237, 2.1057685313255807, 2.1093664439040887, 2.1012912146508578, 2.0919942447006612, 2.0939299527865685, 2.1028179274524228, 2.1133874475776642, 2.1213404469076589, 2.122621084034416, 2.1050279502127505, 2.0984206765928319, 2.1092998387318036, 2.0920685821924048, 2.0954664635824969, 2.0930840562969326, 2.1012196492544031, 2.096586354062723, 2.0907928120143366, 2.0913320998288447, 2.078219502169623, 2.0921953965876021, 2.08577747402297, 2.0805926225521523, 2.0911748210183818, 2.0914958290488235, 2.091746453159903, 2.1008768803349573, 2.0955442206465786, 2.0870839664473286, 2.0979051524298131, 2.1010347853544031, 2.0902643193973298, 2.088176526127552, 2.0926537954444262, 2.0927441346818392, 2.0796127309695791, 2.0938573665334004, 2.0974009928186552, 2.0985864397239093, 2.0903999987469351, 2.0756262303310709, 2.0852750683414039, 2.083674317659133, 2.0871429569225137, 2.0799087317455656, 2.0758011864676615, 2.0814296225980917, 2.0728246815092777, 2.0645418099174364, 2.0768339568633523, 2.0641607046079109, 2.0521185812370617, 2.0535994472892942, 2.0542112185883252, 2.0593859875327047, 2.0501163894519863, 2.0412221819255842, 2.0418314456704594, 2.0481182302622867, 2.0292308557048164, 2.0320642169220395, 2.0336402147951382, 2.022342751050012, 2.0188644996418685, 2.0246542328080142, 2.0163526566231162, 2.0113347959052246, 2.0138348903648624, 2.0044868489112724, 2.0023166982582534, 1.9960955853061064, 1.986622802384987, 1.9926318588877254, 1.9710204707749621, 1.9762250350794768, 1.9791162476752453, 1.954462794359886, 1.966590196729519, 1.96350237676577, 1.9510908391988062, 1.947156739618652, 1.9379742282597379, 1.9259872381705931, 1.9097707266053425, 1.9206741613617877, 1.8990821407416301, 1.8899409547169581, 1.88072146964151, 1.865483133414408, 1.8561568572066813, 1.8446010379963753, 1.8200227209867186, 1.8300027649922179, 1.8046554107157167, 1.782217490771947, 1.7850469919203689, 1.7618659784530257, 1.7549891908617214, 1.7362324802226035, 1.7289198331792295, 1.7150510831238659, 1.7055238038806328, 1.6800053308288869, 1.6714729850478929, 1.644081142829994, 1.616442892586845, 1.6061682000876016, 1.5906319503229991, 1.5813267884877136, 1.5538510945848805, 1.5526559338585633, 1.5180393410041033, 1.5114808482662665, 1.4873979139698208, 1.4753973032895504, 1.4548605499984408, 1.4358679090699169, 1.4173541560784035, 1.4016956637366662, 1.3888552450917104, 1.3676982658594681, 1.3493984218941875, 1.3289074963387895, 1.32025554153466, 1.3093009298322096, 1.3003603218128206, 1.2868994675860088, 1.2717191038637916, 1.2652025660914499, 1.2587930210660734, 1.249441049682807, 1.2517740668805688, 1.2381356592573323, 1.2277972963982131, 1.2151934067328718, 1.2114414675379344, 1.201410943010272, 1.1937418435013896, 1.1994262014367627, 1.1864989679799236, 1.1803223368543929, 1.1781238172618327, 1.1721011060947004, 1.1656370659750235, 1.1585039553887393, 1.16782558148861, 1.162561131082966, 1.1493195615762495, 1.1476081659914332, 1.142155179387033, 1.1423563785965472, 1.1488845957996685, 1.1428224488436676, 1.1338470999765518, 1.1383873136915781, 1.1336197772667287, 1.1355468459539129, 1.1317897691258707, 1.1291678187721372, 1.1336808604721311, 1.1281045591495942, 1.1267211597159972, 1.1299394588567886, 1.1233834587109921, 1.1275176091480918, 1.1178037051656018, 1.1178608034994297, 1.123961653940418, 1.1217702103779945, 1.122350197045076, 1.1191837566506426, 1.1170155946210678, 1.1186600890544964, 1.1209661331783507, 1.1197843305037183, 1.1127034338700907, 1.1090246589153525, 1.1102172657248868, 1.1098538546418015, 1.1060166509674532, 1.0962576240757995, 1.0991779503621317, 1.1082797279447774, 1.0987975804908239, 1.0972115106545288, 1.0951707265296424, 1.0995257261982581, 1.0932957927090301, 1.097366932160448, 1.0959462684472876, 1.0959274282478737, 1.094821592757832, 1.0876938132304912, 1.0869667633807896, 1.0747587786914081, 1.0790894828087969, 1.0811070329559722, 1.0830799670954396, 1.0867100359686013, 1.0852322103043961, 1.0786425636911936, 1.0850583883693363, 1.0800032191105189, 1.0850512280999987, 1.0883788319194412, 1.0828710186341521, 1.0844745177082242, 1.0763931367960857, 1.075748199322305, 1.0774133518276678, 1.0758154854665531, 1.0743418462784202, 1.0748733685414387, 1.0727237182335314, 1.0732334010227649, 1.0709427186025591, 1.0648922587088232, 1.0613998525844914, 1.0642385196838184, 1.0645953689030137, 1.0703323185165721, 1.0639979860071349, 1.0614239862799859, 1.0559045174025827, 1.0542959245537007, 1.0618171017456295, 1.0691150924206627, 1.0647114344923074, 1.06778528386219, 1.0705047745195047, 1.0691381418253059, 1.0671455407767307, 1.0636131278907621, 1.065102280911604, 1.059865143532928, 1.0669901203883143, 1.0647117828326267, 1.0678533255285971, 1.0754189704669466, 1.0666932526160873, 1.0666954865139262, 1.0652863246839868, 1.066884574991936, 1.0642063013285419, 1.0603607965968509, 1.0688423461040364, 1.0664921096879312, 1.0602633580607268, 1.0612495871922545, 1.0579870529427007, 1.0672775892448321, 1.0699757985206189, 1.0629333697722081, 1.0641761474888245, 1.0672415624806979, 1.0651260027842515, 1.0707463658594563, 1.0705346055379392, 1.0650580696045184, 1.061362215605083, 1.0550309301150655, 1.05666663421055, 1.0530504988157015, 1.0487055688591538, 1.052713954886767, 1.051395060362029, 1.048878108784786, 1.0620281729538961, 1.0576402433876115, 1.0557553862796831, 1.0561759996810316, 1.0628338008608316, 1.0546444813832516, 1.0640518090166244, 1.0572620197393707, 1.0640828930925275, 1.0587405835369315, 1.0652159020005152, 1.0599788140879178, 1.0626866605799821, 1.0694544532022361, 1.0608831163509027, 1.0546904912932045, 1.0567164689530508, 1.0483893159074775, 1.049894479664867, 1.053255543959295, 1.0543126627109918, 1.0510518339409496, 1.0501560043558091, 1.0546676452217001, 1.0517809669896079, 1.0522434958007152, 1.05117633312042, 1.0595530650109124, 1.0547572307333484, 1.0557443896211698, 1.0542478231764147, 1.0500173308404277, 1.0527380492785019, 1.0495513175525903, 1.0413748444084427, 1.055312730585567, 1.0529547415879505, 1.050322504923894, 1.0457486809170367, 1.0530902977943462, 1.0566866471987508, 1.0590877753614174, 1.0472313977612844, 1.0519225812604041, 1.0483367859661104, 1.0447608791369873, 1.0500887562933359, 1.0565181581910559, 1.0473598748189361, 1.0515669505556886, 1.0444566441305911, 1.0446205108691122, 1.0495770461183349, 1.0536032895154634, 1.0551748786504838, 1.0562670223459691, 1.0657156796231566, 1.0625433735314602, 1.0631037499978553, 1.0550815274022838, 1.0548512907584717, 1.0429923512941299, 1.0497474268870741, 1.0523938698655013, 1.0487420411602395, 1.0552317428889679, 1.0540281612278184, 1.0588539009674987, 1.0596695602520712, 1.0566258225408343, 1.053945758934864, 1.0556220886941274, 1.0537307928274715, 1.0488802763647627, 1.0415730561283532, 1.0477754368221444, 1.0422839827786348, 1.0391431261243018, 1.0446105990989103, 1.0425476776315259, 1.0394931414269208, 1.0405002030548063, 1.0401593094696664, 1.0460481928487084, 1.0452835370524149, 1.0469423957991459, 1.0462488024461005, 1.0455268322151114, 1.0415502079163517, 1.043533784736683, 1.0385336559448533, 1.0367376436622666, 1.0382160037803825, 1.0393376655806863, 1.0406600790134357, 1.0427485840634332, 1.0475384617917742, 1.0454069629046452, 1.0471987621444048, 1.0448557417569464, 1.0409214774501911, 1.0416061020975116, 1.0458870795582851, 1.050767852490756, 1.0495374949584824, 1.0435638639510143, 1.0396605358109057, 1.0334325768824901, 1.0447567234846749, 1.0480124291735831, 1.0503479194560701, 1.0435508817254804, 1.0533848369374523, 1.0548664582528322, 1.058812841316308, 1.0499267658246576, 1.0559478179443254, 1.0550087117440083, 1.0466119126598463, 1.0455646468700948, 1.0458652487094877, 1.0424867543460339, 1.03653420577139, 1.0416107435263191, 1.0369282778771896, 1.0394015626525053, 1.0348299767039735, 1.0386743965520704, 1.0333263684858511, 1.0348516783396244, 1.0385237626354049, 1.0426121305984859, 1.043967629024525, 1.0383121122820067, 1.0412805968960246, 1.0360429469448487, 1.0337058485179944, 1.0346992953511738, 1.0335507564158326, 1.0324687572091964, 1.030177991932107, 1.0409422724802395, 1.0410963272925355, 1.033271274466848, 1.0390647232130492, 1.0409168494636247, 1.0416139270399027, 1.0372921952610699, 1.0285583127034985, 1.0307124557549658, 1.0316095031720793, 1.0442227636090555, 1.0413061265915593, 1.0374231519323642, 1.0352129936133661, 1.0398437930842688, 1.0302499911240013, 1.0291248377390581, 1.0359011584941795, 1.0275451598453413, 1.0341062315285394, 1.0331601790301062, 1.0237092954427796, 1.0292043883092494, 1.0313094010500292, 1.0350173159860088, 1.0359413255925316, 1.0389919409995048, 1.0359056685323582, 1.0398115219358082, 1.0428326246513158, 1.0325195170725221, 1.03727530681142, 1.0350525325394906, 1.0325980327523365, 1.0260360635691677, 1.0343958003573033, 1.0276232836717498, 1.0276287020843162, 1.0281658337779813, 1.0340249548148921, 1.0339809305543932, 1.0357771235423736, 1.0388353029754025, 1.0330967388851915, 1.0390466290670672, 1.0370509206848555, 1.0356315497043762, 1.0354745667407204, 1.029492247333609, 1.0337963930933556, 1.0277988861857439, 1.0303478002191546, 1.0343038926306187, 1.0325320756848959, 1.0370197980116524, 1.0355672093382988, 1.0361196538660513, 1.0376453843673603, 1.0290875773203454, 1.031969213238805, 1.0313090784852077, 1.0289341546842459, 1.032569659072889, 1.035116742314194, 1.0328915879441931, 1.0335258983129851, 1.0358353630264594, 1.0368343024752888, 1.0403044779045956, 1.0360799103297889, 1.0367831766146491, 1.0351568218439546, 1.0362257880880501, 1.0334117926570532, 1.0240224201664936, 1.0275180808022562, 1.030229188392604, 1.0274988707624368, 1.0313692762309374, 1.0248387272183974, 1.0261293235428164, 1.0283268320317234, 1.0195357256751105, 1.020263917310817, 1.0274765351446959, 1.0279979295981305, 1.0289726864181914, 1.034791417934388, 1.0370359906776669, 1.0256264330289657, 1.0309420910744467, 1.0327198938217794, 1.0335948008943794, 1.0337178618926861, 1.0293102239261067, 1.0258956622325117, 1.0247026150401781, 1.0216210497718168, 1.0201780061056456, 1.0303767964106523, 1.026894748295732, 1.0264494747727699, 1.027546959591255, 1.024454476599995, 1.020541551610489, 1.0347706838615489, 1.0296183844439428, 1.0307231605786262, 1.0339122848913644, 1.0360865903627035, 1.034806739490949, 1.0349085704294403, 1.0389612647329043, 1.0383539403802202, 1.0441648214210546, 1.0395714877720512, 1.0307631817583527, 1.026578820791979, 1.0288130614986959, 1.0347909492969567, 1.0374793472010682, 1.0308914155995486, 1.0353869061204466, 1.0322368485643667, 1.0313556623732092, 1.0326101809035662, 1.021740547753601, 1.0231317525626153, 1.0279672440899617, 1.029496786546934, 1.0351044995435543, 1.0319828724219928, 1.0318212740051467, 1.042492125309308, 1.0357683059004974, 1.0339495174868161, 1.0377093004996598, 1.0341070713488003, 1.0247574756088351, 1.0269658924947032, 1.0334190409274848, 1.0271210005769114, 1.0260000096411237, 1.0270568890644309, 1.0286905355653166, 1.0284389265877896, 1.0310321131691877, 1.0276241304249689, 1.0312389830899407, 1.0272307934579996, 1.030624543014407, 1.0260112245788258, 1.0256312089870914, 1.0232350508619037, 1.0210256737405687, 1.0261296985068757, 1.026401718164212, 1.0287250422875398, 1.0214607012646617, 1.0133051332921355, 1.022663231033899, 1.0228524571026842, 1.0303178359190206], 'Rp': [0.022067520736128395, 0.15958169091611438, 0.93012846814255246, 0.96462498618581904, 0.96757056336650815, 0.96892230852715933, 0.97034575911748977, 0.97237559982636268, 0.97492364868452441, 0.97796969037744896, 0.98105698972708755, 0.98386982381049892, 0.98574703063574209, 0.98689295061034299, 0.98755472976219816, 0.9880313736530224, 0.98839262782090864, 0.98859327107425876, 0.98881106733219193, 0.9889698219950378, 0.98898819064513377, 0.98891891862484038, 0.98897702289028844, 0.98912915447959981, 0.98913305610592028, 0.98912106186682069, 0.98915409673114751, 0.98921714570529717, 0.98925719267419532, 0.98923792286873891, 0.98926331281425817, 0.98928512364107557, 0.98923192372313185, 0.98934775706047196, 0.98936370192416989, 0.98931749333424446, 0.9892681750677087, 0.98936562292872865, 0.98934176358618009, 0.98942462295878719, 0.98940692449256595, 0.9894317907336122, 0.9895461594207825, 0.98953819814580846, 0.98963912980119939, 0.98970432184005663, 0.98985840549699755, 0.99001276195210464, 0.99015063163689787, 0.99024960739436796, 0.99053200478153902, 0.99086141292182472, 0.9910943350633864, 0.99145935652062633, 0.99187900786776606, 0.99231131695657027, 0.99279261593155477, 0.99314685185819418, 0.99370198337294768, 0.99418701063193915, 0.99467493545741847, 0.99513656061281952, 0.99559143416106621, 0.995964669472999, 0.99625821359783784, 0.99656455897685903, 0.99677225008573256, 0.99700630686276848, 0.99716814549215094, 0.99733735185452566, 0.99745705359671144, 0.99757598515103596, 0.99765996894844844, 0.99775603433841975, 0.99782188354755019, 0.99786381017698411, 0.99792224568811094, 0.9979470229895766, 0.9979756495874581, 0.99800126284110646, 0.99800328749662048, 0.9980319196504126, 0.99803693814566419, 0.99805443784382852, 0.99806371528633553, 0.99807120462973076, 0.99807728688821995, 0.99808361144328539, 0.99809342304818693, 0.99808517298832211, 0.99809650725318699, 0.99810031877227345, 0.99810824096260475, 0.99810923992275402, 0.99810457274613096, 0.99812245455157433, 0.99812138944709561, 0.99811323930100659, 0.99812727286917136, 0.99811849732371682, 0.99811488719000108, 0.99811940134379118, 0.99812244110650461, 0.99811040495207071, 0.99812846510245368, 0.99814859076211393, 0.99813433557221998, 0.998147411270216, 0.99815823010267213, 0.99814162913706328, 0.99814819003392452, 0.99814768144692745, 0.9981351618425619, 0.9981341800668182, 0.99813854179842387, 0.99816146835103881, 0.99814188052971342, 0.99815180518475066, 0.99814859880117834, 0.99813649174437713, 0.99813741656796506, 0.99814108382310351, 0.99813325035088662, 0.99813838394580767, 0.9981352164685291, 0.99811086758435286, 0.99813023427964265, 0.99812443137278928, 0.99813403893455888, 0.99814784561434899, 0.99815556615639445, 0.99814841691982059, 0.99815196558029029, 0.99814925391223597, 0.99815208120866972, 0.99815380106682683, 0.99814967508607355, 0.99814600342923465, 0.99814910501293885, 0.99814275967809185, 0.99814302764567142, 0.99813916640016276, 0.99814706689371036, 0.99814013431318727, 0.99814505406635112, 0.99813712658902054, 0.99814825902340898, 0.99813826415510842, 0.99815258609483459, 0.99814487362575532, 0.99813423561088521, 0.99814692218930423, 0.99814418594061205, 0.99814023916854011, 0.99814690011321194, 0.99814658241577991, 0.99814991750168824, 0.99814044942100566, 0.99813262184500251, 0.99814719884662639, 0.99814422892827137, 0.99814741276930363, 0.99813910681870632, 0.99814151824374109, 0.99815396854424732, 0.99815176604317124, 0.99813971604822305, 0.99814647692315772, 0.99813644780514033, 0.99814327103761613, 0.99814815241191335, 0.99813795810556005, 0.99813942662366484, 0.99812699988067188, 0.99813767272450804, 0.99814363019153951, 0.99814895390309399, 0.9981283149207365, 0.99814252179963903, 0.99814715598827553, 0.99815248090343089, 0.99813169356123665, 0.99814495831049244, 0.99814816177040788, 0.99814937466628262, 0.99814139587679274, 0.99813228195723891, 0.99814564998736288, 0.99815556596828547, 0.99815198959577978, 0.99815170568682177, 0.99816396054060363, 0.99815608757716701, 0.99815915969205238, 0.99817559411661549, 0.99816015659625079, 0.99814653403258491, 0.99814456058842349, 0.99815235111902056, 0.99815486670227138, 0.99814864161727757, 0.99814439549214251, 0.99815927420535999, 0.99814694184662422, 0.99813453218384607, 0.99814925633342355, 0.99814651752914008, 0.99814636007353597, 0.99815222317293872, 0.99815678758602022, 0.9981512544923854, 0.99814979659746639, 0.99813910733221833, 0.99815355660380278, 0.99814093836689666, 0.99813340414242357, 0.99814965125045829, 0.99815261763122964, 0.99814616952326829, 0.99815437902613979, 0.99815272651842124, 0.99814358935570746, 0.99814838222368585, 0.99815928703292678, 0.99815092481726519, 0.99815773949873654, 0.99815097831154265, 0.99813920520706112, 0.99814051809619464, 0.99815305847115388, 0.99815093288224099, 0.99814964743572032, 0.99814933420360319, 0.99814928915535184, 0.99815540086607413, 0.99814606108810378, 0.99815494325388743, 0.99815344138051321, 0.99813375655282544, 0.99813520619355367, 0.99815538900708023, 0.99815046388374906, 0.9981527216189926, 0.99814563991292815, 0.9981544892664781, 0.99815783133581892, 0.99815121492652936, 0.99815351618845671, 0.99815446683003861, 0.99815737389874626, 0.9981499529612905, 0.9981555235381927, 0.99814822484821153, 0.99813946590188363, 0.99813957648250573, 0.99815079110642091, 0.99815327879867011, 0.99814595371919501, 0.99813955378715191, 0.99815491159489722, 0.99815780073478622, 0.99816250171038423, 0.99816173332628289, 0.99815732220859976, 0.99814461026506984, 0.99813691950322436, 0.99814475831145189, 0.99814938294745337, 0.99814098321279643, 0.99814012889943793, 0.99813742218004686, 0.99813481679098048, 0.99813985169066233, 0.99814150419032976, 0.99812692711401729, 0.99812984856559805, 0.99812851763986676, 0.99813600911244715, 0.99815099476748093, 0.99815757490859691, 0.99815337360805634, 0.9981422042521757, 0.9981415442116629, 0.99815789192611948, 0.9981638298315344, 0.99813379755773746, 0.99814339007187969, 0.99815658592922352, 0.99815514838677144, 0.99815267541455188, 0.99816058776235694, 0.99816221473226419, 0.99815733485013014, 0.99815091602153738, 0.99813490632554636, 0.99813459552369066, 0.99812945248169993, 0.99814396827335772, 0.99815114208205069, 0.99815628920227339, 0.99813559370802774, 0.99815310868405138, 0.9981576621779219, 0.99815606953880531, 0.99815044202342096, 0.99814473446341057, 0.99814804434743054, 0.99813706404808811, 0.99813897205782509, 0.99814364183288706, 0.99815281021195112, 0.99814870553581014, 0.99814259683560747, 0.99815022818901278, 0.99814471344830225, 0.99815210675511379, 0.99814126743927656, 0.99814234266008062, 0.99815403637178657, 0.99815972346869364, 0.99815089627948639, 0.99814102745096833, 0.99816716843519715, 0.9981505979049351, 0.99815674934952148, 0.99814458424110031, 0.99815787512227472, 0.99815601135273835, 0.99815723413732316, 0.99814195144617257, 0.99814600146302923, 0.99815285652233254, 0.99814794589917355, 0.99814209749447103, 0.99814750793087637, 0.99814389032480577, 0.99815022188043789, 0.99815112623051649, 0.99815026112104011, 0.99814297990435252, 0.99814512507131892, 0.99815804406311481, 0.99815306109628466, 0.99814839952158507, 0.99814770079822535, 0.99815130579284583, 0.99815486932819053, 0.99815450884993151, 0.9981584700909808, 0.99815137130912235, 0.99815111333215301, 0.99816323562106934, 0.99816085393793597, 0.99815506007748012, 0.99815684209632383, 0.99815882847127124, 0.99816668351511251, 0.99815255494710309, 0.99815764780297489, 0.99815473459304793, 0.99816731276004822, 0.99816564682133035, 0.99814896078290338, 0.99816460902255111, 0.99816520944925913, 0.99816721131543551, 0.99815637316335692, 0.99814424196945806, 0.99814020000286507, 0.99814876907759875, 0.99815280301969278, 0.99814198382651176, 0.99814644086269044, 0.99814923009965328, 0.99814315523716479, 0.99813767674642251, 0.9981539024879923, 0.99815032909188628, 0.99814366057216541, 0.9981496894284746, 0.99814471567479679, 0.99814589012098698, 0.99815056368552602, 0.99815678611230363, 0.99815696038318014, 0.99815494660115711, 0.99814342991185245, 0.99814290324695421, 0.99815346253009662, 0.99815519710333978, 0.998155553996244, 0.99815844273845356, 0.99815682975921671, 0.99815596786291139, 0.99816126653610959, 0.99816985946599601, 0.99815629804018025, 0.99815570048552249, 0.99816974581807316, 0.99816531300083999, 0.99816097462645159, 0.99816398109923365, 0.99816647800768366, 0.99815867202049657, 0.9981607161015098, 0.99816500694854171, 0.99815275957639515, 0.99814454952034282, 0.9981426981543472, 0.99815550575859568, 0.99816726046710169, 0.99816240650620058, 0.9981568599621905, 0.99816021496490692, 0.99816814583050906, 0.99816782920508496, 0.99816928730326959, 0.99815869310849936, 0.99814914065808202, 0.99816170530969162, 0.99815515095269447, 0.99816107392119402, 0.99815166424778323, 0.9981525156727904, 0.99815241049650871, 0.99815726230541224, 0.99817177434469539, 0.9981653139140364, 0.99815843606595001, 0.9981612894929579, 0.99815839289449315, 0.99816542664442898, 0.9981738309210062, 0.99817260296663368, 0.99816432096543706, 0.99815495914548757, 0.99814767935763038, 0.99814715157407186, 0.99816275726550086, 0.99816800067226841, 0.9981582176918643, 0.9981733115318614, 0.99817026642720552, 0.99817231934543593, 0.9981667662613416, 0.99816979891383484, 0.99817443313483678, 0.99817567154320364, 0.99818561767114544, 0.99817328797775517, 0.99817886156025981, 0.99818335555575821, 0.99817428831542976, 0.99817395915522977, 0.99817456806793103, 0.99816625140783255, 0.99817043563382646, 0.99817788939309993, 0.99816832464675631, 0.998165509484359, 0.99817487966536866, 0.99817676270675304, 0.99817314845992111, 0.99817321991778973, 0.99818419452537133, 0.99817235382536196, 0.99817032563233177, 0.99816826945474191, 0.99817473881740826, 0.99818786364214118, 0.99817936221039727, 0.99818092624691857, 0.99817779777680338, 0.99818392344688722, 0.99818747921020856, 0.99818334281623422, 0.99819052126716579, 0.99819770034099553, 0.99818654540979512, 0.99819877731884266, 0.99820823943134684, 0.99820782670857333, 0.99820836397866197, 0.99820359834837069, 0.99820987663965155, 0.99821767025288, 0.99821726879471462, 0.99821204235079242, 0.99822818054367468, 0.99822581756668027, 0.99822481504961269, 0.99823491291403399, 0.9982371996809708, 0.99823218572164985, 0.99823995439764179, 0.99824419717587187, 0.99824159876608587, 0.99825015084260171, 0.99825213405499968, 0.9982570878544007, 0.99826555730559741, 0.99826160823396148, 0.99827903777875115, 0.99827452377445702, 0.99827196318984868, 0.99829376645552437, 0.99828398088637338, 0.99828633363937791, 0.99829725280475134, 0.99829991665701545, 0.99830835287039588, 0.99831861693516755, 0.99833291622235842, 0.99832308261065039, 0.99834187013573317, 0.99835034406127976, 0.99835839285448558, 0.99837293225115342, 0.99837962831400251, 0.99838967478487584, 0.99841126919192191, 0.99840227374069423, 0.99842438712344306, 0.99844448692972909, 0.99844200509559156, 0.99846208821821658, 0.99846848059356696, 0.99848546783625369, 0.99849112273210883, 0.99850318742099586, 0.99851100568850082, 0.99853465958622256, 0.99854085549321003, 0.99856595880542576, 0.9985892633947745, 0.99859799400664528, 0.99861152588558677, 0.9986195920066957, 0.99864414686604952, 0.99864542963961023, 0.99867481962130877, 0.99868054861465039, 0.99870186302143982, 0.99871207110816529, 0.99873009662355461, 0.99874658943365469, 0.9987632416473865, 0.99877652931910543, 0.99878812677638817, 0.9988062795298448, 0.99882429699945419, 0.99884033888836465, 0.99884776328805436, 0.99885751035817816, 0.99886519381727101, 0.99887668265812912, 0.99888999886791574, 0.99889570816091811, 0.99890193911863157, 0.9989095945871076, 0.99890782394973188, 0.99891973631069586, 0.99892838512380711, 0.99893976710971821, 0.99894271218163699, 0.99895187967552379, 0.9989588226760987, 0.99895440282466985, 0.99896612666759921, 0.99897022991356854, 0.99897180964858245, 0.99897695702379963, 0.99898263365068318, 0.99898929826478844, 0.99898074453878449, 0.9989852665348522, 0.99899732153871978, 0.99899865974053659, 0.99900317049941512, 0.99900303871208362, 0.99899845463675141, 0.99900254786774667, 0.99901034276368794, 0.99900672282465597, 0.99901078999201509, 0.99900902794953061, 0.99901224444503089, 0.99901468660333947, 0.99901050508563394, 0.99901552327353049, 0.9990167855842339, 0.99901419094096955, 0.99901952096738866, 0.99901682716620199, 0.99902476465370804, 0.99902436757572022, 0.99901967355141563, 0.99902125012131227, 0.99902076518659944, 0.99902376194562936, 0.99902609273247267, 0.99902377997221015, 0.99902182568448028, 0.9990228495608322, 0.99902895179735351, 0.99903209366041756, 0.99903225085350233, 0.99903178396698034, 0.99903469176946857, 0.99904362497121113, 0.99904174454808403, 0.99903275318720708, 0.99904139192532149, 0.999042467843829, 0.99904440957324625, 0.99904056047248357, 0.99904603539067105, 0.99904295650310282, 0.99904347683395545, 0.99904367702175711, 0.99904513334956724, 0.9990516644039279, 0.99905169814193984, 0.99906209962172643, 0.99905836305858831, 0.99905642829547048, 0.99905488323886626, 0.99905242731099442, 0.99905315887103174, 0.99905866054274994, 0.99905324608540103, 0.99905740221763828, 0.99905329926384234, 0.99905125703226194, 0.99905489382819179, 0.99905433751158568, 0.99906062179997335, 0.99906129488990114, 0.99905982163307971, 0.99906133343261616, 0.99906256295524298, 0.99906267500544632, 0.99906390816808777, 0.99906350519656772, 0.99906542902126594, 0.99907059763488371, 0.99907459838356605, 0.99907140604795475, 0.99907091339885279, 0.99906581712507869, 0.99907162725165621, 0.99907386977244705, 0.99907885318684186, 0.99907989055860158, 0.99907334305389195, 0.9990676672344696, 0.99907085321703804, 0.99906832183729388, 0.99906565245961665, 0.99906689409157956, 0.99906906430834574, 0.99907206079331834, 0.99907053008303581, 0.99907494441238731, 0.99906898250642251, 0.99907112832122558, 0.99906812664805777, 0.99906156491385523, 0.99906926925570827, 0.99906953755256023, 0.999070564265929, 0.99906938403916123, 0.99907131733625998, 0.99907457906046482, 0.9990675655423803, 0.99906945633544464, 0.99907461263389952, 0.99907390866136403, 0.999076839835499, 0.99906885227730768, 0.99906655519160481, 0.99907232825035597, 0.9990711921290707, 0.99906908928278415, 0.99907051378812672, 0.9990656335480631, 0.99906639247612061, 0.99907056679210871, 0.99907367220615173, 0.99907944679408167, 0.99907790359479332, 0.99908091880241756, 0.9990847689504796, 0.99908133561351931, 0.9990830272663862, 0.9990848756945131, 0.99907332671117688, 0.99907751362718566, 0.99907864129009505, 0.99907819538258003, 0.99907260118767738, 0.99907982034954668, 0.99907131103149571, 0.9990782062627841, 0.99907136803253516, 0.99907599430211225, 0.99907050842394773, 0.99907492773002271, 0.99907291443912472, 0.99906727540581253, 0.99907406363297069, 0.99907957146786974, 0.99907801919743067, 0.99908519651784089, 0.99908367185008762, 0.99908149593978135, 0.99908032683654535, 0.99908294822409305, 0.99908353975315256, 0.99907998046181146, 0.9990820596620974, 0.99908189519795343, 0.99908263581957202, 0.9990760297259742, 0.99908021233077626, 0.99907873024415428, 0.99908068468657873, 0.99908395715333487, 0.99908143481680745, 0.99908405085932206, 0.99909118170879563, 0.99907906295921112, 0.99908103623663014, 0.99908351937142903, 0.99908810365901335, 0.9990817902233462, 0.99907774453190223, 0.9990762151250242, 0.99908599997809744, 0.99908203196984591, 0.99908612425311771, 0.9990883067860501, 0.99908360929871209, 0.99907790689563392, 0.99908596152739715, 0.99908225762364944, 0.99908908600394419, 0.99908837039171172, 0.99908396668766375, 0.99908045702699122, 0.99907933651197844, 0.99907889467916988, 0.99906995900061402, 0.99907441180756285, 0.999072674768958, 0.99907927509846894, 0.99907965827077516, 0.99908968853777047, 0.99908413578061828, 0.99908150153548625, 0.99908471528476206, 0.9990792808828437, 0.99908045317308114, 0.99907667471546524, 0.99907515276124614, 0.99907794615437262, 0.99908055675243801, 0.99907868802578226, 0.9990803667512107, 0.99908469197087679, 0.99909142534825202, 0.99908576288818174, 0.99909041874552473, 0.99909305546058158, 0.99908891194608374, 0.99909018878730471, 0.99909362699946858, 0.99909200387103569, 0.99909227342117146, 0.99908735983475871, 0.99908901810609752, 0.99908643974552125, 0.99908715965829165, 0.99908764794982252, 0.99909130965901993, 0.99908929738295571, 0.99909381068043746, 0.9990958625966091, 0.99909453189615416, 0.99909305595999276, 0.99909208309001662, 0.99908994297170861, 0.99908613506916388, 0.99908766072387367, 0.99908638682270923, 0.99908817622189738, 0.99909174698638625, 0.99909183159781789, 0.99908717401135672, 0.99908331989863197, 0.99908425747415885, 0.99908920638799126, 0.99909286013631493, 0.99909823983418578, 0.99908832804252989, 0.99908567204753318, 0.99908389587883872, 0.99908927192046726, 0.99908091540602817, 0.9990800913693112, 0.99907600025137311, 0.99908379723776652, 0.99907892734020876, 0.99908019904748679, 0.99908652935256681, 0.99908782883533553, 0.99908804710161636, 0.99909035853089689, 0.99909558526340503, 0.99909088982339, 0.99909553762225323, 0.99909330085982351, 0.99909857670752911, 0.99909431410428196, 0.9990983412498543, 0.9990967978804427, 0.99909360086536614, 0.99909027041634169, 0.99909055784681833, 0.99909380000784409, 0.99909156978201574, 0.99909594831022974, 0.99909842053619335, 0.99909757309979741, 0.99909813755986909, 0.99909947470832938, 0.99910093697024627, 0.9990917320506606, 0.99909206984844545, 0.99909820697873408, 0.99909339023334576, 0.99909151043113642, 0.99909165558639978, 0.99909495496239331, 0.99910231099751357, 0.99910071922117449, 0.99909988937582939, 0.99908867498030163, 0.99909153792960981, 0.99909512267810496, 0.99909707292003458, 0.99909244264259123, 0.99910083283528095, 0.99910188905198716, 0.99909602303417433, 0.99910336666572963, 0.99909859180222793, 0.99909830347165562, 0.99910701333992002, 0.99910179971503843, 0.99909990313833474, 0.99909704905614038, 0.99909600023222733, 0.99909329717141604, 0.99909587945772593, 0.99909296380670132, 0.99908988576166902, 0.99909884899755319, 0.99909471641472591, 0.99909697229149386, 0.99909876388683172, 0.99910456917595136, 0.99909726358476503, 0.99910416380220846, 0.99910323527488243, 0.99910265500548123, 0.99909771301157946, 0.99909760471896847, 0.99909694749423272, 0.99909357156853706, 0.99909841512008268, 0.99909324500582586, 0.99909515716801189, 0.99909621673794724, 0.99909629585763782, 0.99910189600957144, 0.99909773870762386, 0.99910295466761356, 0.99910085093326673, 0.99909759389408881, 0.99909910491304588, 0.99909501305932291, 0.99909621239454316, 0.99909603926427915, 0.99909504316258535, 0.99910209464136779, 0.99910002556807598, 0.9991001266970061, 0.99910217664796164, 0.99909936008360611, 0.99909683334459054, 0.99909853982642438, 0.99909824416968229, 0.9990962898377489, 0.99909635742915326, 0.99909342665078937, 0.9990959532367113, 0.99909541615350406, 0.99909654384608959, 0.99909621295730644, 0.99909837894004505, 0.99910636698574529, 0.99910321239178745, 0.99910088311147416, 0.99910365493273035, 0.99910100165256666, 0.99910561234566897, 0.99910472487929058, 0.99910258925566253, 0.99911017776952815, 0.9991099608506856, 0.99910334357657526, 0.99910277923154556, 0.99910210567167168, 0.99909690685433461, 0.99909502508331982, 0.99910491732438256, 0.99910114631726488, 0.99909929274634823, 0.99909794367437665, 0.99909930115187295, 0.9991018023533601, 0.99910487812284199, 0.9991061590448066, 0.99910864708580327, 0.9991096293039321, 0.99910099717532175, 0.9991037830773889, 0.99910413722140567, 0.99910364365551629, 0.99910587289046493, 0.99910930256733654, 0.99909696788196822, 0.99910163942915464, 0.99910052258633941, 0.99909768694047574, 0.99909590391100911, 0.99909683239829783, 0.99909687134129999, 0.9990934874814047, 0.9990937764533051, 0.99908910630652192, 0.99909315111931019, 0.9991005526947796, 0.99910410081730172, 0.99910221271586497, 0.99909701091499259, 0.99909465831137656, 0.99910040165993297, 0.99909712259542549, 0.99909933322250455, 0.99910027464713391, 0.99909877325378804, 0.99910837191013546, 0.99910710300922156, 0.99910283967946356, 0.99910163794058837, 0.99909695051484937, 0.9990995998198795, 0.99909945368084596, 0.99909012633759775, 0.99909666614065518, 0.99909770358536187, 0.99909446637394739, 0.99909801202265125, 0.99910570611156924, 0.99910367669210087, 0.9990981028405258, 0.99910445525137948, 0.99910519102201234, 0.99910359304421392, 0.9991024303673437, 0.99910240262925676, 0.99910020632074004, 0.99910363549594761, 0.99910072513849435, 0.99910382899984906, 0.99910066858330837, 0.99910454524331294, 0.99910494137913497, 0.99910693806100703, 0.99910886370771224, 0.99910443161948725, 0.99910432927742432, 0.99910268338088393, 0.99910851581613658, 0.99911562422446487, 0.99910772610011922, 0.99910736241312204, 0.99910096485343858]}
gibbs_all_performances = eval(open(folder+'nmtf_gibbs_performances.txt','r').read())
# Non-probabilistic NMTF
#np_all_performances = {'R^2': [0.6258765455951327, 0.671376745958256, 0.6803219120927243, 0.6856724319385931, 0.6897914419619472, 0.693147668097625, 0.6959811377094869, 0.6984460090216679, 0.7006472286777821, 0.7026587622894724, 0.7045341232137792, 0.7063128114672601, 0.7080244204252755, 0.7096913436703565, 0.7113306059462803, 0.7129551283280895, 0.7145746192465009, 0.7161962140253131, 0.7178249437779788, 0.7194640883103198, 0.7211154507582819, 0.7227795804176518, 0.7244559624367721, 0.7261431874292845, 0.727839109816731, 0.7295410003444678, 0.7312456954532567, 0.7329497438997568, 0.7346495491655097, 0.7363415047974591, 0.7380221189312353, 0.7396881239019524, 0.7413365670539753, 0.7429648795783304, 0.7445709213344134, 0.7461530010054735, 0.7477098724206034, 0.7492407092763647, 0.7507450616601901, 0.7522227986132095, 0.7536740414235877, 0.7550990924172131, 0.7564983637560747, 0.7578723102382867, 0.759221369399819, 0.7605459114267878, 0.7618462005669655, 0.7631223689326495, 0.7643744028513428, 0.765602141268819, 0.7668052851552061, 0.7679834164162722, 0.7691360244737355, 0.7702625384534849, 0.7713623628105186, 0.7724349142236626, 0.7734796577072913, 0.7744961401017674, 0.7754840194036234, 0.7764430887593917, 0.7773732943474946, 0.7782747467825799, 0.7791477260677306, 0.7799926804674834, 0.7808102199582613, 0.7816011051203922, 0.7823662324620537, 0.7831066172126196, 0.7838233745994262, 0.7845177005410143, 0.785190852566753, 0.7858441316237655, 0.7864788652720837, 0.7870963926111524, 0.7876980511349074, 0.7882851655857301, 0.7888590387734287, 0.7894209442452303, 0.7899721206358733, 0.7905137674910102, 0.7910470423391899, 0.791573058784101, 0.7920928853962438, 0.7926075451982648, 0.7931180155583303, 0.7936252283285828, 0.7941300700894178, 0.7946333823835751, 0.7951359618461358, 0.7956385601569888, 0.7961418837608885, 0.7966465933170376, 0.7971533028551979, 0.7976625786290102, 0.7981749376697514, 0.7986908460554478, 0.7992107169213317, 0.7997349082484082, 0.8002637204773996, 0.8007973940056683, 0.8013361066349949, 0.8018799710479642, 0.8024290324001834, 0.8029832661242544, 0.8035425760488937, 0.8041067929424296, 0.8046756735935928, 0.8052489005433014, 0.8058260825786728, 0.8064067560938706, 0.8069903874113532, 0.8075763761410697, 0.8081640596338084, 0.8087527185584095, 0.8093415836010034, 0.8099298432483335, 0.8105166525778709, 0.8111011429357434, 0.8116824323418015, 0.812259636421115, 0.8128318796257386, 0.8133983064818715, 0.8139580925784748, 0.8145104550058159, 0.815054661958226, 0.8155900412352294, 0.816115987408929, 0.8166319674719317, 0.8171375248367072, 0.8176322816207924, 0.8181159392187842, 0.8185882772273158, 0.8190491508490999, 0.8194984869530633, 0.8199362790067936, 0.8203625811234141, 0.8207775014768911, 0.8211811953384532, 0.8215738579735175, 0.8219557176156512, 0.8223270287040902, 0.8226880655369757, 0.8230391164562444, 0.823380478644368, 0.8237124535796017, 0.8240353431664814, 0.8243494465328649, 0.8246550574641626, 0.8249524624297042, 0.8252419391451161, 0.825523755607727, 0.8257981695388076, 0.8260654281662844, 0.8263257682837581, 0.8265794165257485, 0.8268265898043415, 0.8270674958585787, 0.827302333874432, 0.8275312951398783, 0.8277545637059902, 0.8279723170311158, 0.8281847265907942, 0.8283919584410933, 0.8285941737274107, 0.8287915291345256, 0.8289841772767139, 0.8291722670292185, 0.8293559438041922, 0.8295353497755956, 0.8297106240584029, 0.8298819028479529, 0.8300493195254495, 0.8302130047355416, 0.8303730864415927, 0.8305296899638277, 0.8306829380050492, 0.8308329506679721, 0.8309798454676971, 0.8311237373422034, 0.8312647386632126, 0.8314029592492389, 0.8315385063821848, 0.8316714848284339, 0.8318019968650442, 0.831930142311349, 0.8320560185660513, 0.8321797206496808, 0.8323013412521895, 0.8324209707853084, 0.8325386974392585, 0.8326546072433337, 0.832768784129889, 0.8328813100012444, 0.8329922647990048, 0.8331017265753845, 0.8332097715660716, 0.8333164742642625, 0.8334219074954975, 0.8335261424929767, 0.8336292489730788, 0.8337312952108127, 0.8338323481149934, 0.8339324733029623, 0.8340317351746711, 0.8341301969860091, 0.8342279209212609, 0.8343249681646088, 0.8344213989705929, 0.834517272733503, 0.8346126480556397, 0.8347075828144304, 0.8348021342283858, 0.8348963589218892, 0.8349903129888252, 0.8350840520550502, 0.835177631339733, 0.8352711057155774, 0.8353645297679433, 0.8354579578529047, 0.835551444154261, 0.8356450427395304, 0.83573880761494, 0.8358327927794561, 0.8359270522778468, 0.8360216402528213, 0.8361166109962259, 0.8362120189993376, 0.8363079190022087, 0.8364043660421132, 0.8365014155010184, 0.8365991231521018, 0.8366975452052544, 0.8367967383515347, 0.8368967598065128, 0.8369976673524296, 0.8370995193791071, 0.8372023749234825, 0.8373062937076752, 0.837411336175451, 0.8375175635269245, 0.8376250377513541, 0.8377338216578031, 0.8378439789035215, 0.8379555740197406, 0.8380686724346903, 0.8381833404935153, 0.8382996454748106, 0.8384176556034277, 0.8385374400591902, 0.8386590689811287, 0.8387826134668127, 0.8389081455663162, 0.8390357382703296, 0.839165465491903, 0.839297402041246, 0.8394316235930064, 0.8395682066453881, 0.8397072284704412, 0.8398487670548468, 0.8399929010304237, 0.840139709593636, 0.8402892724132509, 0.8404416695253756, 0.8405969812149678, 0.8407552878829868, 0.840916669898252, 0.8410812074331463, 0.841248980282219, 0.8414200676628075, 0.8415945479967634, 0.8417724986724477, 0.8419539957861119, 0.8421391138619463, 0.8423279255500258, 0.8425205013015631, 0.842716909020936, 0.8429172136940899, 0.8431214769930866, 0.8433297568567467, 0.843542107047522, 0.8437585766850092, 0.8439792097567662, 0.8442040446074155, 0.8444331134073539, 0.8446664416027854, 0.8449040473491941, 0.8451459409308576, 0.8453921241694491, 0.8456425898253405, 0.8458973209957441, 0.8461562905143882, 0.8464194603580439, 0.8466867810657559, 0.8469581911772688, 0.8472336166976483, 0.847512970595673, 0.8477961523440345, 0.8480830475098007, 0.848373527403925, 0.8486674487988458, 0.8489646537232818, 0.8492649693433373, 0.8495682079388256, 0.8498741669833697, 0.8501826293362937, 0.850493363553598, 0.8508061243243566, 0.8511206530378093, 0.8514366784850338, 0.8517539176976575, 0.8520720769243642, 0.8523908527441609, 0.8527099333134055, 0.8530289997415705, 0.8533477275885591, 0.8536657884742199, 0.8539828517884703, 0.8542985864882418, 0.8546126629652357, 0.8549247549664247, 0.8552345415471878, 0.8555417090351449, 0.8558459529811926, 0.8561469800728705, 0.8564445099843159, 0.8567382771365423, 0.8570280323418775, 0.8573135443070551, 0.8575946009708252, 0.8578710106540459, 0.8581426030030407, 0.8584092297105115, 0.8586707650025107, 0.8589271058846257, 0.8591781721456115, 0.8594239061219854, 0.8596642722322785, 0.8598992562946918, 0.8601288646464356, 0.8603531230869504, 0.8605720756703725, 0.8607857833748224, 0.860994322677429, 0.8611977840642966, 0.8613962705041279, 0.8615898959127144, 0.8617787836335441, 0.8619630649569946, 0.8621428776976507, 0.8623183648459068, 0.8624896733067144, 0.8626569527349368, 0.8628203544736819, 0.8629800305989854, 0.8631361330716703, 0.8632888129949214, 0.8634382199742404, 0.8635845015749304, 0.8637278028711065, 0.8638682660793895, 0.8640060302698868, 0.8641412311468221, 0.8642740008910356, 0.8644044680567182, 0.8645327575149447, 0.8646589904368904, 0.8647832843100016, 0.864905752980818, 0.8650265067185475, 0.8651456522939839, 0.8652632930686973, 0.8653795290898753, 0.8654944571865305, 0.8656081710630661, 0.8657207613865725, 0.8658323158643572, 0.8659429193085387, 0.8660526536846667, 0.8661615981415651, 0.8662698290197333, 0.866377419835914, 0.8664844412415664, 0.8665909609533258, 0.8666970436538094, 0.8668027508615124, 0.8669081407690424, 0.8670132680494975, 0.8671181836314994, 0.8672229344442106, 0.8673275631346359, 0.8674321077605852, 0.8675366014639023, 0.867641072129883, 0.8677455420402328, 0.8678500275283321, 0.8679545386470329, 0.8680590788605537, 0.8681636447731886, 0.8682682259084833, 0.8683728045530582, 0.8684773556793163, 0.868581846960774, 0.8686862388925796, 0.8687904850278584, 0.8688945323379238, 0.8689983217009387, 0.8691017885196105, 0.869204863463855, 0.8693074733294502, 0.8694095419985982, 0.8695109914834015, 0.8696117430288459, 0.8697117182481688, 0.8698108402608928, 0.8699090348024602, 0.8700062312744792, 0.8701023637061892, 0.87019737160076, 0.8702912006443515, 0.8703838032612021, 0.8704751390040876, 0.8705651747758714, 0.8706538848842501, 0.8707412509377179, 0.8708272615960317, 0.8709119121926547, 0.870995204249795, 0.8710771449085528, 0.871157746297405, 0.8712370248618833, 0.8713150006770345, 0.8713916967621926, 0.8714671384150481, 0.8715413525791581, 0.8716143672560297, 0.8716862109700259, 0.8717569122915705, 0.8718264994217109, 0.8718949998389671, 0.8719624400076581, 0.8720288451455366, 0.8720942390475163, 0.8721586439615837, 0.8722220805124952, 0.8722845676686424, 0.8723461227473881, 0.8724067614541637, 0.872466497950807, 0.8725253449487114, 0.8725833138225769, 0.8726404147407464, 0.872696656808276, 0.8727520482191256, 0.8728065964139684, 0.872860308240387, 0.8729131901123585, 0.8729652481661581, 0.8730164884100796, 0.873066916865551, 0.8731165396976026, 0.8731653633328712, 0.8732133945637253, 0.8732606406374042, 0.8733071093294619, 0.8733528090011609, 0.8733977486408188, 0.8734419378894576, 0.8734853870514189, 0.8735281070908938, 0.8735701096155343, 0.8736114068485277, 0.8736520115906239, 0.8736919371737148, 0.8737311974075892, 0.873769806521481, 0.8738077791019723, 0.873845130028728, 0.8738818744094217, 0.8739180275150592, 0.8739536047167796, 0.8739886214250261, 0.8740230930318161, 0.8740570348566938, 0.8740904620967739, 0.8741233897811744, 0.8741558327299699, 0.874187805517707, 0.8742193224414417, 0.8742503974931399, 0.8742810443362683, 0.874311276286313, 0.8743411062949646, 0.8743705469376652, 0.8743996104042091, 0.8744283084921122, 0.874456652602412, 0.874484653737657, 0.8745123225017769, 0.8745396691016045, 0.8745667033498125, 0.874593434669067, 0.8746198720971918, 0.8746460242932, 0.8746718995440489, 0.8746975057719718, 0.8747228505423112, 0.8747479410717358, 0.8747727842367752, 0.8747973865826124, 0.87482175433206, 0.8748458933946974, 0.8748698093761128, 0.8748935075872207, 0.8749169930536348, 0.8749402705250582, 0.8749633444846868, 0.8749862191585882, 0.8750088985250627, 0.8750313863239457, 0.875053686065851, 0.8750758010413513, 0.8750977343300533, 0.8751194888095843, 0.8751410671644719, 0.8751624718948827, 0.8751837053252469, 0.8752047696127248, 0.8752256667555282, 0.875246398601073, 0.8752669668539684, 0.8752873730838253, 0.8753076187328829, 0.8753277051234504, 0.8753476334651591, 0.8753674048620174, 0.8753870203192811, 0.8754064807501285, 0.875425786982136, 0.8754449397635828, 0.8754639397695544, 0.8754827876078759, 0.8755014838248671, 0.875520028910929, 0.8755384233059628, 0.8755566674046403, 0.8755747615615134, 0.8755927060959855, 0.8756105012971431, 0.8756281474284486, 0.8756456447323088, 0.8756629934345107, 0.8756801937485438, 0.8756972458797887, 0.8757141500295914, 0.8757309063992171, 0.8757475151936707, 0.875763976625404, 0.8757802909178753, 0.8757964583089928, 0.8758124790543962, 0.8758283534306068, 0.8758440817380129, 0.8758596643036931, 0.8758751014840719, 0.8758903936673986, 0.8759055412760337, 0.8759205447685515, 0.8759354046416348, 0.8759501214317753, 0.8759646957167551, 0.8759791281169229, 0.8759934192962524, 0.8760075699631866, 0.8760215808712718, 0.8760354528195692, 0.8760491866528737, 0.876062783261716, 0.8760762435821796, 0.8760895685955257, 0.8761027593276441, 0.8761158168483367, 0.876128742270447, 0.8761415367488501, 0.8761542014793188, 0.8761667376972688, 0.8761791466764232, 0.8761914297273788, 0.8762035881961148, 0.8762156234624537, 0.8762275369384779, 0.8762393300669322, 0.8762510043196162, 0.8762625611957833, 0.8762740022205578, 0.8762853289433865, 0.8762965429365167, 0.876307645793547, 0.8763186391280122, 0.8763295245720494, 0.8763403037751246, 0.8763509784028461, 0.8763615501358385, 0.8763720206687148, 0.8763823917091181, 0.8763926649768496, 0.8764028422030767, 0.8764129251296093, 0.8764229155082662, 0.8764328151002921, 0.8764426256758552, 0.8764523490135939, 0.8764619869002149, 0.8764715411301389, 0.8764810135051864, 0.8764904058342802, 0.8764997199331763, 0.8765089576242022, 0.8765181207359942, 0.8765272111032254, 0.8765362305663154, 0.8765451809711097, 0.876554064168513, 0.8765628820140854, 0.8765716363675612, 0.8765803290923132, 0.876588962054724, 0.8765975371234693, 0.8766060561687091, 0.8766145210611551, 0.8766229336710352, 0.8766312958669178, 0.8766396095144182, 0.8766478764747457, 0.8766560986031169, 0.8766642777470119, 0.8766724157442718, 0.8766805144210389, 0.8766885755895272, 0.8766966010456374, 0.8767045925664012, 0.8767125519072646, 0.8767204807992091, 0.876728380945728, 0.8767362540196402, 0.8767441016597782, 0.8767519254675396, 0.8767597270033249, 0.8767675077828727, 0.8767752692735168, 0.8767830128903673, 0.8767907399924606, 0.8767984518788796, 0.8768061497848916, 0.876813834878109, 0.8768215082547247, 0.8768291709358389, 0.8768368238639197, 0.8768444678994236, 0.876852103817628, 0.8768597323056895, 0.8768673539599839, 0.8768749692837615, 0.8768825786851381, 0.8768901824754846, 0.8768977808682183, 0.8769053739780517, 0.8769129618207107, 0.8769205443131562, 0.876928121274319, 0.8769356924263814, 0.8769432573965995, 0.876950815719674, 0.8769583668406853, 0.8769659101185616, 0.8769734448300875, 0.8769809701744242, 0.8769884852781114, 0.8769959892005346, 0.8770034809397962, 0.8770109594389738, 0.8770184235926886, 0.877025872253955, 0.8770333042412355, 0.8770407183456562, 0.8770481133383035, 0.8770554879775581, 0.8770628410163788, 0.8770701712095026, 0.8770774773204691, 0.8770847581284323, 0.8770920124346967, 0.8770992390689264, 0.8771064368949802, 0.8771136048163378, 0.8771207417810807, 0.87712784678639, 0.8771349188825612, 0.8771419571764947, 0.8771489608346794, 0.8771559290856443, 0.877162861221905, 0.8771697566013931, 0.8771766146483997, 0.8771834348540459, 0.8771902167763067, 0.8771969600396103, 0.8772036643340518, 0.8772103294142465, 0.8772169550978566, 0.8772235412638328, 0.8772300878503964, 0.8772365948528034, 0.8772430623209236, 0.8772494903566609, 0.8772558791112627, 0.8772622287825252, 0.8772685396119385, 0.8772748118817997, 0.8772810459123033, 0.8772872420586341, 0.877293400708096, 0.8772995222772679, 0.8773056072092236, 0.8773116559708127, 0.8773176690500171, 0.8773236469533938, 0.8773295902036056, 0.8773354993370436, 0.8773413749015483, 0.8773472174542291, 0.8773530275593803, 0.877358805786496, 0.8773645527083821, 0.877370268899361, 0.8773759549335679, 0.8773816113833395, 0.8773872388176794, 0.8773928378008186, 0.8773984088908376, 0.8774039526383761, 0.8774094695854091, 0.8774149602640827, 0.8774204251956313, 0.8774258648893325, 0.8774312798415407, 0.877436670534761, 0.877442037436786, 0.8774473809998737, 0.8774527016599855, 0.8774579998360593, 0.8774632759293441, 0.8774685303227665, 0.8774737633803478, 0.8774789754466723, 0.8774841668463939, 0.8774893378837901, 0.8774944888423631, 0.8774996199844929, 0.8775047315511283, 0.87750982376154, 0.877514896813114, 0.8775199508812057, 0.8775249861190405, 0.877530002657678, 0.877535000606023, 0.8775399800509034, 0.8775449410572047, 0.8775498836680619, 0.8775548079051201, 0.8775597137688484, 0.8775646012389297, 0.8775694702747016, 0.8775743208156712, 0.87757915278209, 0.8775839660755866, 0.8775887605798791, 0.8775935361615244, 0.8775982926707538, 0.8776030299423454, 0.8776077477965666, 0.8776124460401619, 0.8776171244673945, 0.8776217828611327, 0.8776264209939841, 0.8776310386294578, 0.877635635523175, 0.8776402114240941, 0.8776447660757741, 0.8776492992176456, 0.8776538105862995, 0.8776582999167866, 0.877662766943907, 0.877667211403513, 0.8776716330337785, 0.8776760315764731, 0.8776804067782009, 0.8776847583916114, 0.8776890861765795, 0.8776933899013434, 0.8776976693435948, 0.8777019242915224, 0.8777061545447964, 0.877710359915494, 0.8777145402289664, 0.8777186953246238, 0.8777228250566698, 0.8777269292947472, 0.8777310079245129, 0.877735060848143, 0.877739087984748, 0.8777430892707186, 0.8777470646599888, 0.8777510141242252, 0.877754937652933, 0.8777588352534909, 0.8777627069511114, 0.87776655278873, 0.877770372826821, 0.8777741671431554, 0.8777779358324889, 0.8777816790061946, 0.8777853967918413, 0.8777890893327186, 0.8777927567873168, 0.8777963993287639, 0.8778000171442228, 0.8778036104342604, 0.8778071794121828, 0.8778107243033475, 0.8778142453444557, 0.8778177427828311, 0.8778212168756814, 0.8778246678893602, 0.87782809609862, 0.8778315017858668, 0.8778348852404193, 0.8778382467577722, 0.8778415866388797, 0.8778449051894323, 0.8778482027191734, 0.8778514795412141, 0.8778547359713799, 0.8778579723275718, 0.8778611889291592, 0.8778643860963907, 0.8778675641498306, 0.8778707234098313, 0.877873864196024, 0.8778769868268418, 0.8778800916190717, 0.8778831788874365, 0.8778862489442013, 0.8778893020988153, 0.8778923386575745, 0.8778953589233206, 0.8778983631951622, 0.8779013517682239, 0.8779043249334237, 0.8779072829772739, 0.8779102261817088, 0.8779131548239324, 0.8779160691762967, 0.8779189695061934, 0.877921856075972, 0.877924729142878, 0.8779275889590044, 0.8779304357712714, 0.8779332698214143, 0.8779360913459907, 0.8779389005764037, 0.8779416977389383, 0.8779444830548112, 0.8779472567402309, 0.8779500190064751, 0.8779527700599685, 0.8779555101023843, 0.8779582393307384, 0.8779609579375052, 0.8779636661107361, 0.8779663640341777, 0.8779690518874126, 0.8779717298459836, 0.8779743980815442, 0.8779770567619962, 0.8779797060516424, 0.8779823461113383, 0.8779849770986399, 0.8779875991679666, 0.8779902124707557, 0.8779928171556199, 0.8779954133685095, 0.8779980012528726, 0.8780005809498098, 0.8780031525982412, 0.8780057163350625, 0.8780082722953021, 0.8780108206122813, 0.8780133614177678, 0.8780158948421336, 0.8780184210145017, 0.8780209400629035, 0.8780234521144225, 0.8780259572953423, 0.8780284557312873, 0.878030947547367, 0.8780334328683135, 0.8780359118186102, 0.8780383845226323, 0.8780408511047664, 0.8780433116895415, 0.8780457664017483, 0.8780482153665559, 0.8780506587096275, 0.8780530965572299, 0.8780555290363382, 0.8780579562747441, 0.8780603784011489, 0.8780627955452598, 0.8780652078378834, 0.8780676154110058, 0.8780700183978836, 0.8780724169331182, 0.8780748111527272, 0.8780772011942184, 0.8780795871966564, 0.878081969300721, 0.878084347648767, 0.8780867223848781, 0.878089093654913, 0.8780914616065549, 0.8780938263893492, 0.8780961881547409, 0.8780985470561061, 0.8781009032487852, 0.8781032568900993, 0.878105608139379, 0.8781079571579734, 0.8781103041092648, 0.8781126491586809, 0.8781149924736912, 0.8781173342238126, 0.8781196745806028, 0.8781220137176549, 0.878124351810582, 0.8781266890370049, 0.8781290255765297, 0.8781313616107241, 0.8781336973230962, 0.8781360328990511, 0.8781383685258662, 0.8781407043926475, 0.8781430406902863, 0.8781453776114119, 0.8781477153503428, 0.8781500541030289, 0.878152394066994, 0.8781547354412731, 0.8781570784263436, 0.8781594232240557, 0.8781617700375548, 0.8781641190712064, 0.8781664705305063, 0.8781688246219976, 0.8781711815531762, 0.8781735415323926, 0.8781759047687533, 0.8781782714720116, 0.8781806418524614, 0.8781830161208147, 0.8781853944880906, 0.878187777165482, 0.8781901643642269, 0.8781925562954787, 0.8781949531701577, 0.878197355198813, 0.8781997625914627, 0.8782021755574445, 0.8782045943052491, 0.8782070190423563, 0.8782094499750556, 0.8782118873082708, 0.878214331245375, 0.878216781987996, 0.8782192397358244, 0.8782217046864069, 0.8782241770349356, 0.87822665697404, 0.8782291446935594, 0.8782316403803228, 0.8782341442179109, 0.8782366563864212, 0.8782391770622305, 0.8782417064177374, 0.8782442446211173, 0.8782467918360581, 0.8782493482215059, 0.8782519139313969, 0.8782544891143835, 0.8782570739135693, 0.878259668466229], 'MSE': [14.711373280759894, 12.922203358344662, 12.57045936444856, 12.360064924399614, 12.198096212501701, 12.066121873776693, 11.954703493963809, 11.857779225956568, 11.771222332362303, 11.69212431944716, 11.618380921996945, 11.548438910773173, 11.481134607887078, 11.415587447451037, 11.351127978358019, 11.287248114959025, 11.223566099984982, 11.159801356501619, 11.095756049844592, 11.031301210693316, 10.966365935698475, 10.900928625317821, 10.835009524673186, 10.768664053997034, 10.701976582246274, 10.635054429834437, 10.568021995005747, 10.50101498838853, 10.434174833154556, 10.367643343126252, 10.301557826241906, 10.236046774413285, 10.171226292681927, 10.107197392377806, 10.044044228629204, 9.981833307803905, 9.9206136321368081, 9.8604176937322681, 9.8012631841614581, 9.7431552530208609, 9.6860891309878117, 9.6300529299303577, 9.5750304427147235, 9.5210037856590688, 9.4679557538677948, 9.415871790792588, 9.3647415056184045, 9.314559703393412, 9.2653269217508676, 9.2170494937041436, 9.1697391777784549, 9.1234124143766611, 9.0780892805818887, 9.033792224444495, 8.9905446641295956, 8.9483695371339671, 8.9072878802922375, 8.8673175128576531, 8.828471883173572, 8.790759125182257, 8.7541813552688517, 8.7187342238173198, 8.6844067204784761, 8.6511812184854069, 8.6190337321971473, 8.5879343538894446, 8.5578478308501325, 8.5287342419833685, 8.5005497340491445, 8.473247280848323, 8.446777433505833, 8.4210890358631332, 8.3961298852824218, 8.3718473253703056, 8.348188762865604, 8.3251021059268648, 8.302536125150592, 8.2804407418036039, 8.2587672499900826, 8.2374684808847221, 8.2164989178689751, 8.1958147715483047, 8.1753740233341325, 8.1551364456816504, 8.1350636062826016, 8.1151188626208501, 8.0952673523669034, 8.0754759841728934, 8.0557134325607915, 8.0359501397915629, 8.0161583268731125, 7.9963120152039142, 7.9763870597566369, 7.9563611941681796, 7.9362140876091791, 7.9159274128463597, 7.8954849244758414, 7.8748725458816526, 7.85407846306079, 7.8330932230498123, 7.8119098342839575, 7.7905238658312737, 7.7689335420720083, 7.7471398290513536, 7.7251465084398703, 7.7029602348064241, 7.680590571763573, 7.6580500025143952, 7.6353539104268098, 7.6125205255220525, 7.5895708331979312, 7.5665284421377619, 7.5434194091946871, 7.520272020083163, 7.4971165259498926, 7.4739848373161282, 7.4509101784306351, 7.4279267067117951, 7.4050691035971372, 7.3823721446923134, 7.3598702585061417, 7.3375970841870535, 7.3155850394260211, 7.2938649099895869, 7.2724654721178688, 7.251413158241129, 7.2307317751420941, 7.2104422818668379, 7.190562632461055, 7.1711076861106742, 7.1520891846498333, 7.1335157948332224, 7.1153932104149131, 7.0977243070727161, 7.0805093416757412, 7.0637461863746163, 7.0474305875263834, 7.0315564395181029, 7.0161160640756535, 7.0011004865433035, 6.9864997017996657, 6.9723029238266543, 6.9584988143726516, 6.945075687556364, 6.9320216885766772, 6.9193249458701427, 6.9069736970585414, 6.8949563898406527, 6.8832617595999643, 6.8718788859352067, 6.8607972305903777, 6.8500066593871116, 6.8394974507688771, 6.8292602934802362, 6.8192862757435906, 6.8095668680892487, 6.8000939017521969, 6.7908595442933057, 6.7818562738403063, 6.7730768530922631, 6.7645143039889328, 6.7561618837275494, 6.7480130626111041, 6.7400615040411527, 6.7323010468207922, 6.7247256898144112, 6.717329578913577, 6.7101069961863296, 6.703052351033671, 6.6961601731428271, 6.6894251070076232, 6.6828419077799905, 6.6764054382193336, 6.6701106665192187, 6.6639526648073355, 6.6579266081342983, 6.6520277737920095, 6.6462515408231493, 6.6405933896086085, 6.6350489014402685, 6.6296137580078565, 6.6242837407463728, 6.6190547300066518, 6.6139227040253861, 6.608883737682528, 6.6039340010426377, 6.5990697576853856, 6.5942873628339793, 6.5895832612963865, 6.5849539852353374, 6.5803961517860428, 6.5759064605400841, 6.5714816909145464, 6.5671186994262385, 6.562814416887047, 6.558565845538741, 6.5543700561417708, 6.5502241850325591, 6.5461254311619284, 6.5420710531255137, 6.538058366196843, 6.5340847393712478, 6.5301475924277046, 6.526244393015685, 6.522372653771761, 6.5185299294703922, 6.514713814212314, 6.5109219386538726, 6.5071519672783955, 6.5034015957119884, 6.499668548084407, 6.4959505744354802, 6.4922454481674725, 6.4885509635430356, 6.4848649332287378, 6.4811851858830227, 6.4775095637881188, 6.4738359205252483, 6.470162118691678, 6.4664860276589007, 6.462805521370834, 6.4591184761815166, 6.4554227687304877, 6.4517162738562508, 6.4479968625460602, 6.4442623999227546, 6.4405107432671169, 6.4367397400775266, 6.4329472261649272, 6.429131023786165, 6.425288939814819, 6.4214187639519071, 6.417518266977873, 6.4135851990481738, 6.4096172880355109, 6.4056122379211358, 6.401567727240276, 6.3974814075852446, 6.3933509021716226, 6.3891738044736481, 6.3849476769346545, 6.3806700497614113, 6.3763384198080786, 6.3719502495619205, 6.3675029662382014, 6.3629939609970387, 6.3584205882931819, 6.3537801653724664, 6.3490699719290822, 6.3442872499390361, 6.3394292036864481, 6.3344930000008199, 6.3294757687245076, 6.3243746034306154, 6.3191865624139174, 6.3139086699775495, 6.3085379180405772, 6.3030712680926833, 6.2975056535226637, 6.2918379823510611, 6.2860651403952801, 6.2801839949007352, 6.274191398668183, 6.2680841947130306, 6.2618592214892441, 6.2555133187145984, 6.249043333831195, 6.2424461291384041, 6.2357185896328398, 6.2288576315914375, 6.2218602119302702, 6.2147233383740792, 6.207444080464553, 6.2000195814378154, 6.1924470709941843, 6.1847238789812335, 6.1768474500058455, 6.1688153589842312, 6.1606253276320295, 6.1522752418890621, 6.1437631702626243, 6.1350873830632846, 6.1262463724944967, 6.1172388735440979, 6.1080638856102958, 6.0987206947787218, 6.0892088966485236, 6.0795284195874251, 6.069679548273835, 6.0596629473632744, 6.0494796850944841, 6.0391312566260833, 6.0286196068737743, 6.0179471525926358, 6.007116803429426, 5.9961319816469638, 5.9849966402044661, 5.973715278861472, 5.9622929579598916, 5.9507353095286817, 5.9390485453529029, 5.9272394616489521, 5.9153154399955667, 5.903284444184111, 5.8911550126729377, 5.878936246359185, 5.8666377914186123, 5.8542698170063181, 5.8418429876653599, 5.829368430346773, 5.8168576960111107, 5.8043227158522992, 5.7917757522617341, 5.7792293447301306, 5.7666962509697282, 5.7541893836245412, 5.741721743024482, 5.729306346525445, 5.716956155064941, 5.7046839976437118, 5.6925024945240397, 5.6804239800073857, 5.6684604257152866, 5.6566233653515097, 5.6449238219576676, 5.6333722386951246, 5.6219784141820073, 5.610751443388283, 5.599699665037889, 5.5888306163845627, 5.5781509961169276, 5.5676666360105482, 5.5573824817791238, 5.5473025833939422, 5.5374300949410573, 5.527767283877921, 5.5183155493476965, 5.5090754490105214, 5.5000467336730763, 5.4912283888434974, 5.4826186822143521, 5.4742152159891795, 5.4660149829157483, 5.4580144248775184, 5.4502094929141549, 5.4425957076012645, 5.4351682187962727, 5.4279218638670823, 5.4208512236351387, 5.4139506753974498, 5.4072144425218518, 5.4006366402434951, 5.3942113174119504, 5.3879324940564279, 5.3817941947365746, 5.3757904777362624, 5.3699154602316561, 5.3641633396240866, 5.3585284112739853, 5.3530050829048763, 5.3475878859682355, 5.3422714842696424, 5.3370506801620179, 5.331920418606261, 5.3268757893916616, 5.3219120277958591, 5.3170245139489722, 5.3122087711497281, 5.3074604633655715, 5.3027753921294378, 5.2981494930325743, 5.2935788319951902, 5.2890596014833475, 5.2845881168297621, 5.2801608128010979, 5.2757742405493273, 5.2714250650717265, 5.2671100632990671, 5.2628261229220801, 5.2585702420611273, 5.2543395298729996, 5.2501312081838636, 5.2459426142237664, 5.2417712045271276, 5.2376145600487467, 5.233470392524775, 5.229336552086397, 5.2252110361059723, 5.2210919992235905, 5.2169777644633797, 5.2128668353068948, 5.2087579085425117, 5.2046498876578688, 5.2005418964863184, 5.1964332927627312, 5.1923236811864015, 5.1882129255365017, 5.1841011593398001, 5.1799887945539753, 5.1758765277088372, 5.1717653429456343, 5.1676565114141955, 5.163551586533889, 5.1594523946999997, 5.1553610211194103, 5.1512797905953471, 5.1472112432382326, 5.1431581052624677, 5.1391232552222244, 5.1351096862400416, 5.1311204649749493, 5.1271586882507316, 5.1232274384106864, 5.1193297385676191, 5.1154685089704826, 5.1116465257063783, 5.1078663828935627, 5.1041304594030832, 5.1004408909769436, 5.0967995484007833, 5.0932080221501739, 5.0896676136787518, 5.0861793332655081, 5.0827439041056968, 5.079361772123109, 5.0760331208164438, 5.0727578903290533, 5.0695357998568715, 5.0663663724809869, 5.0632489615262051, 5.0601827775966601, 5.057166915520523, 5.0542003805358622, 5.0512821131615828, 5.0484110123156327, 5.0455859563560805, 5.0428058218297931, 5.0400694998083351, 5.0373759097746715, 5.0347240110924334, 5.0321128121430254, 5.0295413772570754, 5.0270088315938573, 5.0245143641416981, 5.0220572290208922, 5.0196367452739041, 5.0172522953279408, 5.014903322307787, 5.0125893263729413, 5.0103098602446865, 5.0080645240809822, 5.0058529598505963, 5.0036748453486561, 5.0015298879911949, 4.9994178185159166, 4.9973383847108463, 4.9952913452838894, 4.993276463975457, 4.9912935040094935, 4.9893422229631872, 4.9874223681267624, 4.9855336724090549, 4.9836758508323582, 4.9818485976444604, 4.980051584061763, 4.9782844566434523, 4.976546836282937, 4.9748383177903817, 4.97315847002921, 4.9715068365605095, 4.9698829367411426, 4.9682862672169179, 4.9667163037479423, 4.9651725033023526, 4.963654306354754, 4.9621611393280309, 4.960692417120387, 4.9592475456642102, 4.957825924469307, 4.9564269491080148, 4.9550500136071509, 4.9536945127180561, 4.9523598440419105, 4.9510454099940731, 4.9497506195959184, 4.9484748900888862, 4.9472176483691008, 4.9459783322442661, 4.9447563915189052, 4.9435512889148585, 4.942362500837346, 4.9411895179968575, 4.9400318458987948, 4.9388890052131202, 4.93776053203504, 4.9366459780501906, 4.9355449106136975, 4.9344569127552562, 4.9333815831193135, 4.9323185358495687, 4.9312674004256571, 4.9302278214600852, 4.9291994584610297, 4.928181985566753, 4.9271750912572623, 4.9261784780463334, 4.9251918621583695, 4.9242149731927967, 4.9232475537783404, 4.9222893592200636, 4.9213401571400404, 4.920399727113975, 4.9194678603047954, 4.9185443590941063, 4.9176290367129232, 4.9167217168719324, 4.9158222333926762, 4.9149304298394672, 4.9140461591535871, 4.9131692832898288, 4.9122996728554957, 4.9114372067535399, 4.9105817718292073, 4.9097332625208576, 4.90889158051632, 4.9080566344137964, 4.9072283393888991, 4.906406616867466, 4.9055913942049498, 4.9047826043722997, 4.9039801856489005, 4.903184081322669, 4.9023942393974345, 4.90161061230779, 4.9008331566416476, 4.900061832870092, 4.8992966050849027, 4.8985374407439268, 4.897784310423142, 4.8970371875766761, 4.8962960483034816, 4.8955608711210328, 4.8948316367456792, 4.8941083278795023, 4.8933909290031234, 4.8926794261747748, 4.8919738068347955, 4.8912740596156512, 4.890580174157412, 4.889892140928108, 4.8892099510492741, 4.8885335961260346, 4.8878630680823525, 4.8871983590008652, 4.8865394609673611, 4.8858863659205971, 4.8852390655064886, 4.8845975509381487, 4.8839618128605089, 4.8833318412212225, 4.8827076251468675, 4.8820891528254959, 4.8814764113956448, 4.8808693868419004, 4.8802680638974314, 4.8796724259540776, 4.8790824549796721, 4.8784981314435267, 4.8779194342495833, 4.8773463406781614, 4.8767788263357543, 4.8762168651134266, 4.8756604291536103, 4.8751094888251814, 4.8745640127072516, 4.8740239675805981, 4.8734893184275512, 4.8729600284392172, 4.8724360590301945, 4.8719173698602027, 4.871403918862228, 4.8708956622767667, 4.870392554691545, 4.8698945490860952, 4.8694015968810902, 4.8689136479908965, 4.8684306508798931, 4.8679525526211744, 4.8674792989570124, 4.8670108343611735, 4.8665471021016806, 4.8660880443040853, 4.8656336020143787, 4.8651837152612432, 4.8647383231169705, 4.8642973637572648, 4.8638607745182467, 4.8634284919521242, 4.8630004518795991, 4.8625765894399624, 4.8621568391377679, 4.8617411348870876, 4.8613294100519369, 4.8609215974839506, 4.8605176295567016, 4.8601174381968031, 4.8597209549123761, 4.8593281108179802, 4.8589388366575212, 4.8585530628240248, 4.8581707193774282, 4.8577917360604248, 4.8574160423123649, 4.8570435672816297, 4.8566742398373117, 4.8563079885798182, 4.8559447418511361, 4.8555844277451987, 4.8552269741185308, 4.8548723086017178, 4.8545203586119676, 4.85417105136756, 4.8538243139037114, 4.8534800730914913, 4.8531382556589087, 4.8527987882156092, 4.8524615972810796, 4.852126609316433, 4.8517937507610878, 4.8514629480734888, 4.8511341277773301, 4.850807216512445, 4.8504821410919039, 4.8501588285644299, 4.8498372062829587, 4.8495172019794683, 4.8491987438459319, 4.8488817606220431, 4.8485661816889483, 4.8482519371697856, 4.8479389580364778, 4.8476271762229493, 4.8473165247440173, 4.8470069378207041, 4.8466983510104322, 4.8463907013426573, 4.8460839274589276, 4.8457779697570107, 4.8454727705380494, 4.8451682741567321, 4.8448644271727135, 4.8445611785031284, 4.8442584795745027, 4.8439562844736903, 4.8436545500959385, 4.8433532362892935, 4.8430523059937949, 4.8427517253743613, 4.8424514639453466, 4.8421514946862221, 4.8418517941461374, 4.8415523425361773, 4.8412531238083059, 4.8409541257187199, 4.8406553398753758, 4.8403567617675183, 4.8400583907767762, 4.8397602301684204, 4.8394622870625126, 4.8391645723834529, 4.8388671007883781, 4.8385698905738979, 4.8382729635607005, 4.8379763449571547, 4.8376800632017174, 4.8373841497851693, 4.8370886390539303, 4.8367935679950662, 4.8364989760054371, 4.836204904645605, 4.8359113973815475, 4.8356184993152995, 4.8353262569075772, 4.8350347176940049, 4.83474392999823, 4.8344539426434965, 4.83416480466627, 4.8338765650330844, 4.8335892723643319, 4.8333029746664149, 4.833017719074733, 4.8327335516095156, 4.8324505169463849, 4.832168658202975, 4.8318880167430844, 4.831608631999794, 4.8313305413176222, 4.8310537798152442, 4.8307783802681907, 4.8305043730124257, 4.8302317858678236, 4.8299606440819609, 4.8296909702929902, 4.8294227845111797, 4.8291561041179429, 4.8288909438816043, 4.8286273159883182, 4.8283652300871696, 4.8281046933480773, 4.8278457105309753, 4.8275882840651461, 4.8273324141371274, 4.8270780987859307, 4.8268253340044653, 4.8265741138453473, 4.8263244305307769, 4.8260762745648362, 4.8258296348469605, 4.8255844987863838, 4.8253408524164438, 4.8250986805074856, 4.8248579666786888, 4.8246186935074045, 4.8243808426360504, 4.8241443948760896, 4.8239093303085649, 4.8236756283812028, 4.8234432680018973, 4.8232122276283329, 4.8229824853537249, 4.8227540189887517, 4.8225268061396207, 4.8223008242823351, 4.8220760508332363, 4.8218524632160467, 4.8216300389252051, 4.8214087555862069, 4.821188591012235, 4.8209695232582988, 4.8207515306720206, 4.8205345919417262, 4.8203186861422314, 4.8201037927774379, 4.819889891821405, 4.8196769637563683, 4.8194649896091368, 4.8192539509850674, 4.8190438301003482, 4.8188346098120132, 4.8186262736464709, 4.8184188058257185, 4.8182121912922264, 4.8180064157318601, 4.8178014655947807, 4.8175973281148812, 4.817393991327279, 4.81719144408404, 4.8169896760677915, 4.8167886778038813, 4.8165884406701016, 4.8163889569047811, 4.816190219612567, 4.8159922227682825, 4.8157949612183701, 4.8155984306804589, 4.815402627740264, 4.8152075498463551, 4.8150131953025683, 4.8148195632577648, 4.8146266536934927, 4.8144344674086827, 4.8142430060022532, 4.8140522718529297, 4.8138622680966039, 4.8136729986014526, 4.8134844679399515, 4.8132966813593745, 4.8131096447489581, 4.8129233646056147, 4.812737847996817, 4.8125531025218145, 4.8123691362706325, 4.8121859577813799, 4.8120035759956359, 4.8118220002127572, 4.8116412400422552, 4.8114613053556603, 4.8112822062367995, 4.8111039529318536, 4.8109265557986527, 4.810750025255599, 4.8105743717309375, 4.8103996056115577, 4.8102257371930488, 4.8100527766296031, 4.8098807338853202, 4.8097096186865747, 4.8095394404756266, 4.8093702083658689, 4.8092019310989578, 4.8090346170037952, 4.8088682739577973, 4.8087029093505036, 4.8085385300494989, 4.8083751423695471, 4.8082127520436728, 4.8080513641978175, 4.8078909833282095, 4.8077316132815069, 4.8075732572384648, 4.8074159177003439, 4.8072595964785609, 4.8071042946872495, 4.8069500127390903, 4.8067967503439348, 4.8066445065103656, 4.8064932795500734, 4.8063430670850638, 4.8061938660572148, 4.8060456727405096, 4.8058984827555147, 4.8057522910859438, 4.8056070920973193, 4.8054628795574459, 4.8053196466585559, 4.8051773860409668, 4.8050360898179516, 4.8048957496018314, 4.8047563565310423, 4.8046179012979611, 4.804480374177273, 4.8043437650549876, 4.8042080634574855, 4.8040732585808801, 4.8039393393203431, 4.8038062942992017, 4.8036741118979469, 4.80354278028239, 4.8034122874320877, 4.8032826211672344, 4.8031537691756041, 4.8030257190382315, 4.8028984582545293, 4.8027719742661796, 4.8026462544802913, 4.8025212862915554, 4.802397057102997, 4.8022735543460051, 4.8021507654990634, 4.8020286781054144, 4.8019072797894315, 4.801786558272167, 4.8016665013853777, 4.8015470970848222, 4.8014283334620806, 4.8013101987555302, 4.8011926813602166, 4.8010757698365873, 4.8009594529183399, 4.8008437195191833, 4.8007285587388289, 4.8006139598677908, 4.800499912391631, 4.8003864059941757, 4.8002734305599484, 4.8001609761760973, 4.8000490331331642, 4.7999375919255671, 4.7998266432513352, 4.7997161780111846, 4.7996061873070754, 4.7994966624402737, 4.7993875949089775, 4.7992789764052706, 4.7991707988120691, 4.7990630541991397, 4.7989557348193657, 4.7988488331042243, 4.7987423416591186, 4.7986362532587101, 4.7985305608414155, 4.7984252575044959, 4.7983203364981515, 4.798215791220108, 4.7981116152096073, 4.7980078021414547, 4.7979043458201946, 4.797801240173702, 4.7976984792471358, 4.7975960571966958, 4.7974939682833027, 4.7973922068662525, 4.797290767397147, 4.7971896444133399, 4.7970888325317871, 4.7969883264428699, 4.7968881209040877, 4.7967882107340483, 4.7966885908062258, 4.7965892560432239, 4.7964902014105446, 4.7963914219109958, 4.7962929125788154, 4.7961946684741683, 4.7960966846774706, 4.7959989562839827, 4.7959014783987275, 4.7958042461309986, 4.7957072545896358, 4.7956104988778776, 4.7955139740886832, 4.7954176753001088, 4.7953215975708012, 4.7952257359356727, 4.795130085401766, 4.7950346409440296, 4.7949393975016479, 4.7948443499742357, 4.7947494932181787, 4.7946548220435004, 4.7945603312102421, 4.7944660154255772, 4.7943718693409938, 4.7942778875493683, 4.7941840645823754, 4.7940903949081664, 4.7939968729290312, 4.7939034929792799, 4.7938102493234132, 4.7937171361542044, 4.7936241475911956, 4.7935312776792163, 4.7934385203871859, 4.7933458696067683, 4.7932533191517424, 4.7931608627568734, 4.7930684940775539, 4.7929762066892243, 4.7928839940868988, 4.792791849685301, 4.7926997668186422, 4.7926077387408892, 4.7925157586259779, 4.7924238195683939, 4.7923319145837082, 4.7922400366093925, 4.7921481785058004, 4.7920563330569843, 4.7919644929724026, 4.7918726508878748, 4.7917807993673431, 4.7916889309045292, 4.7915970379248156, 4.79150511278716, 4.7914131477863799, 4.7913211351553935, 4.791229067067694, 4.7911369356400675, 4.791044732935303, 4.7909524509652774, 4.7908600816938796, 4.7907676170405447, 4.7906750488835392, 4.7905823690636655, 4.7904895693881118, 4.7903966416343327, 4.7903035775543703, 4.7902103688789861, 4.7901170073225154, 4.7900234845872181, 4.7899297923685165, 4.7898359223600533, 4.7897418662588072, 4.7896476157709085, 4.7895531626171097, 4.7894584985390205, 4.7893636153050112, 4.7892685047167634, 4.7891731586157089, 4.7890775688900753, 4.788981727481799, 4.7888856263938013, 4.7887892576976148, 4.7886926135409693, 4.7885956861558707, 4.7884984678669431, 4.7884009510995549, 4.7883031283887965, 4.7882049923881382, 4.7881065358787689, 4.7880077517788582, 4.7879086331528695, 4.787809173221711, 4.7877093653723373, 4.7876092031681745, 4.7875086803591165, 4.7874077908920372, 4.7873065289215901, 4.7872048888206278, 4.7871028651911987], 'Rp': [0.80356837206859411, 0.81952979947871418, 0.82490383822701885, 0.82819277076196918, 0.83064293791745358, 0.83262217025105201, 0.83429387939329513, 0.83575159389197751, 0.83705638856548537, 0.83825087571686541, 0.83936588546475266, 0.84042423091227025, 0.84144304569188888, 0.84243531656151471, 0.8434109222903281, 0.84437735466366981, 0.84534022881238802, 0.84630365147018594, 0.84727049257247755, 0.84824259105643929, 0.84922091626975993, 0.85020570004885065, 0.85119655010385342, 0.85219255212683753, 0.85319236557931022, 0.85419431615056307, 0.85519648626045597, 0.85619680364164885, 0.85719312696164263, 0.85818332665216912, 0.85916535863132237, 0.8601373284552194, 0.86109754362108104, 0.86204455223057186, 0.86297716694530768, 0.86389447403157793, 0.86479582819099654, 0.86568083470298485, 0.86654932107599292, 0.86740130086184564, 0.86823693250729495, 0.86905647610877612, 0.86986025073384576, 0.8706485946231165, 0.87142183014248853, 0.87218023486560003, 0.8729240196711564, 0.87365331426896986, 0.87436816014205532, 0.87506851052142665, 0.87575423670150443, 0.8764251397598315, 0.87708096656618395, 0.87772142885420401, 0.87834622408238283, 0.87895505683047914, 0.87954765955799452, 0.88012381168753107, 0.88068335615805537, 0.88122621280844537, 0.8817523881856919, 0.88226198160759506, 0.88275518753218707, 0.88323229447997043, 0.88369368091150835, 0.88413980857383212, 0.88457121389349991, 0.88498849801391954, 0.88539231605402968, 0.88578336611444497, 0.88616237848258039, 0.88653010540111998, 0.88688731167166324, 0.88723476627568043, 0.88757323511237318, 0.88790347488263255, 0.88822622809030594, 0.8885422190888923, 0.88885215107069637, 0.88915670387736789, 0.88945653250161283, 0.88975226614942515, 0.89004450773717558, 0.8903338337073724, 0.89062079405875105, 0.89090591249973106, 0.89118968664774079, 0.89147258821036746, 0.89175506309686803, 0.89203753142001729, 0.89232038735890162, 0.89260399886272623, 0.89288870718402646, 0.89317482623760647, 0.89346264178845447, 0.89375241047838838, 0.89404435870774024, 0.89433868139402706, 0.89463554063611295, 0.89493506431742798, 0.89523734468835481, 0.89554243697299662, 0.89585035805093194, 0.89616108526945792, 0.89647455544577037, 0.8967906641216985, 0.89710926513530731, 0.89743017057381291, 0.89775315117031029, 0.89807793720261209, 0.89840421994543074, 0.89873165371757224, 0.89905985855294435, 0.89938842350827353, 0.89971691060225867, 0.9000448593594842, 0.90037179191005501, 0.90069721857178409, 0.90102064381855174, 0.90134157251592828, 0.90165951628589958, 0.90197399984731519, 0.90228456716927352, 0.90259078727198916, 0.9028922595147385, 0.90318861822370666, 0.90347953653315849, 0.90376472934176033, 0.90404395531844428, 0.90431701792960217, 0.90458376549708219, 0.90484409033327273, 0.90509792703277347, 0.90534525002782074, 0.90558607053538276, 0.90582043303690429, 0.90604841143647397, 0.90627010504088046, 0.9064856344959259, 0.90669513779904276, 0.90689876649035106, 0.90709668210396288, 0.90728905294072593, 0.90747605120293762, 0.90765785051292369, 0.9078346238206686, 0.90800654169185824, 0.90817377095627794, 0.90833647368859904, 0.90849480648754199, 0.90864892001633613, 0.90879895876587213, 0.90894506100250627, 0.90908735886391911, 0.90922597856912757, 0.90936104071187884, 0.90949266061054312, 0.90962094869104904, 0.90974601088357532, 0.90986794901727763, 0.90998686120068006, 0.91010284217871407, 0.91021598366003453, 0.9103263746106991, 0.91043410151240967, 0.91053924858509305, 0.91064189797494777, 0.91074212991019721, 0.91084002282712817, 0.91093565346997574, 0.91102909696799395, 0.91112042689315986, 0.91120971530226447, 0.91129703276634999, 0.91138244839064986, 0.91146602982768843, 0.9115478432857721, 0.91162795353500947, 0.91170642391234147, 0.9117833163269351, 0.91185869126702201, 0.91193260780885832, 0.91200512362831898, 0.91207629501547893, 0.91214617689232447, 0.91221482283362476, 0.91228228509084686, 0.9123486146190275, 0.91241386110630684, 0.9124780730059181, 0.91254129757040114, 0.91260358088764015, 0.91266496791854002, 0.9127255025360963, 0.9127852275654682, 0.91284418482493135, 0.91290241516744175, 0.9129599585226017, 0.9130168539388539, 0.91307313962575187, 0.91312885299616264, 0.91318403070822807, 0.91323870870709301, 0.91329292226618763, 0.91334670602805479, 0.91340009404467304, 0.91345311981717747, 0.91350581633501682, 0.91355821611438159, 0.91361035123605616, 0.913662253382585, 0.91371395387470056, 0.91376548370714583, 0.91381687358376784, 0.91386815395194598, 0.91391935503641952, 0.91397050687235171, 0.91402163933788672, 0.91407278218598775, 0.91412396507571869, 0.91417521760292764, 0.91422656933033963, 0.91427804981713068, 0.91432968864784891, 0.91438151546093227, 0.91443355997658593, 0.91448585202412624, 0.91453842156887566, 0.9145912987384186, 0.91464451384838152, 0.91469809742758512, 0.91475208024271204, 0.91480649332222042, 0.91486136797976747, 0.91491673583687083, 0.9149726288448361, 0.91502907930592658, 0.91508611989368593, 0.91514378367234683, 0.91520210411517122, 0.91526111512180985, 0.9153208510343348, 0.91538134665203386, 0.9154426372446941, 0.91550475856437497, 0.91556774685532982, 0.91563163886207188, 0.91569647183532143, 0.91576228353563294, 0.91582911223448471, 0.91589699671264091, 0.91596597625543874, 0.91603609064482461, 0.91610738014782211, 0.91617988550108354, 0.91625364789120856, 0.91632870893055418, 0.91640511062804775, 0.91648289535476346, 0.91656210580368558, 0.91664278494341866, 0.91672497596526936, 0.91680872222330267, 0.91689406716693367, 0.91698105426550403, 0.91706972692446365, 0.91716012839249617, 0.91725230165937355, 0.91734628934372986, 0.91744213357051918, 0.91753987583760543, 0.91763955687102983, 0.9177412164685862, 0.91784489333132313, 0.91795062488265156, 0.9180584470747748, 0.91816839418233021, 0.91828049858309069, 0.91839479052576278, 0.91851129788497821, 0.91863004590381836, 0.91875105692420189, 0.91887435010582685, 0.91899994113437478, 0.91912784192006314, 0.91925806028772117, 0.91939059965991976, 0.91952545873492109, 0.91966263116146452, 0.91980210521284389, 0.91994386346274803, 0.92008788246612727, 0.9202341324481107, 0.92038257700478676, 0.92053317281970271, 0.92068586940023078, 0.92084060883836072, 0.92099732560049896, 0.92115594635114362, 0.92131638981533293, 0.92147856668488737, 0.92164237957331951, 0.92180772302421565, 0.92197448357764844, 0.92214253989884409, 0.92231176297292417, 0.92248201636889304, 0.92265315657557179, 0.92282503341118605, 0.92299749050775171, 0.9231703658702044, 0.92334349250929126, 0.92351669914630519, 0.92368981098635239, 0.92386265055588612, 0.9240350385989079, 0.92420679502516001, 0.9243777399023787, 0.924547694483424, 0.92471648225826109, 0.92488393001942082, 0.92504986892882513, 0.92521413557308219, 0.9253765729934984, 0.9255370316769187, 0.92569537049312378, 0.92585145756480869, 0.926005171056409, 0.92615639986915443, 0.92630504423072491, 0.92645101616967029, 0.92659423986657541, 0.92673465187648929, 0.92687220121951341, 0.92700684933939692, 0.92713856993270838, 0.92726734865414262, 0.92739318270597659, 0.92751608032234278, 0.92763606016083111, 0.9277531506157547, 0.92786738906831379, 0.92797882108965823, 0.92808749961286596, 0.92819348408941238, 0.92829683964487475, 0.92839763624741167, 0.92849594790101675, 0.9285918518737476, 0.92868542796952824, 0.92877675784996461, 0.92886592441100135, 0.9289530112174561, 0.92903810199684866, 0.92912128019266726, 0.92920262857574953, 0.92928222891188417, 0.92936016168249269, 0.92943650585504345, 0.92951133869909075, 0.92958473564400013, 0.9296567701737577, 0.92972751375476537, 0.92979703579232298, 0.92986540361164183, 0.9299326824595896, 0.9299989355234014, 0.93006422396304356, 0.93012860695393162, 0.93019214173719988, 0.9302548836746648, 0.93031688630618747, 0.9303782014070221, 0.9304388790430248, 0.93049896762188866, 0.93055851393848665, 0.93061756321263833, 0.93067615911777868, 0.93073434379893738, 0.93079215787873437, 0.93084964045010132, 0.9309068290544853, 0.93096375964462319, 0.93102046653102477, 0.93107698231139768, 0.93113333778290608, 0.93118956183688129, 0.93124568133655483, 0.93130172097847863, 0.93135770313889465, 0.93141364770703206, 0.93146957190773305, 0.93152549011686092, 0.9315814136733076, 0.93163735069261844, 0.9316933058876874, 0.93174928040289895, 0.93180527166862892, 0.93186127328360735, 0.93191727493276899, 0.93197326234841471, 0.93202921732206667, 0.93208511777384773, 0.93214093788512187, 0.93219664829865378, 0.93225221638873113, 0.93230760660141165, 0.93236278086262814, 0.93241769904900884, 0.93247231951377141, 0.93252659965706264, 0.93258049652796471, 0.93263396744330651, 0.932686970606869, 0.9327394657122543, 0.93279141451228498, 0.93284278133910237, 0.93289353356056381, 0.93294364196104362, 0.93299308103767409, 0.93304182920632217, 0.93308986891518353, 0.93313718666731549, 0.93318377295666766, 0.93322962212504901, 0.9332747321496595, 0.93331910437265087, 0.9333627431850825, 0.93340565567788558, 0.93344785127260921, 0.93348934134348227, 0.93353013884160452, 0.93357025793037407, 0.93360971363995449, 0.93364852154664102, 0.93368669748160671, 0.93372425727203145, 0.93376121651593391, 0.9337975903914506, 0.93383339349976202, 0.93386863974060852, 0.93390334221840432, 0.93393751317692786, 0.93397116395991853, 0.93400430499529219, 0.93403694580007735, 0.93406909500374669, 0.93410076038722156, 0.93413194893526985, 0.93416266689997862, 0.93419291987296893, 0.93422271286445469, 0.93425205038697567, 0.93428093654207423, 0.93430937510803691, 0.93433736962709768, 0.93436492349045774, 0.93439204001986642, 0.93441872254433478, 0.93444497447092012, 0.93447079934862531, 0.93449620092467311, 0.93452118319251765, 0.93454575043128851, 0.9345699072364374, 0.9345936585416591, 0.93461700963224581, 0.93463996615028566, 0.93466253409220956, 0.93468471979937273, 0.93470652994235837, 0.9347279714999237, 0.93474905173335765, 0.9347697781572597, 0.93479015850747627, 0.93481020070716758, 0.93482991283177586, 0.93484930307355796, 0.93486837970647652, 0.93488715105191911, 0.93490562544570044, 0.93492381120693235, 0.93494171660878678, 0.93495934985166607, 0.93497671903866308, 0.93499383215361254, 0.9350106970415667, 0.93502732139178213, 0.93504371272308062, 0.9350598783715105, 0.93507582548011869, 0.9350915609907301, 0.93510709163755623, 0.93512242394242884, 0.93513756421157712, 0.93515251853367776, 0.93516729277907296, 0.93518189260004569, 0.93519632343195358, 0.93521059049506439, 0.93522469879715997, 0.93523865313652332, 0.93525245810551239, 0.93526611809443383, 0.93527963729573083, 0.93529301970844481, 0.93530626914287296, 0.93531938922533175, 0.9353323834031374, 0.93534525494958665, 0.93535800696906357, 0.9353706424021756, 0.93538316403090005, 0.93539557448380672, 0.9354078762412148, 0.93542007164038299, 0.93543216288068398, 0.93544415202872433, 0.93545604102345459, 0.9354678316812548, 0.93547952570091542, 0.93549112466866369, 0.93550263006298884, 0.93551404325955367, 0.93552536553591381, 0.93553659807623168, 0.93554774197588941, 0.93555879824599475, 0.93556976781781287, 0.93558065154712178, 0.93559145021841006, 0.93560216454906142, 0.93561279519333729, 0.93562334274633274, 0.93563380774779181, 0.93564419068581728, 0.93565449200047579, 0.93566471208730506, 0.93567485130071282, 0.93568490995726894, 0.93569488833891068, 0.93570478669602619, 0.93571460525050598, 0.93572434419860917, 0.93573400371381699, 0.93574358394960555, 0.93575308504207944, 0.93576250711257469, 0.9357718502701905, 0.9357811146142212, 0.93579030023653043, 0.93579940722386223, 0.93580843566010685, 0.93581738562842132, 0.93582625721339319, 0.93583505050301519, 0.9358437655907168, 0.93585240257720603, 0.93586096157230791, 0.93586944269672401, 0.93587784608364022, 0.93588617188036305, 0.93589442024975256, 0.93590259137163156, 0.93591068544408518, 0.93591870268463229, 0.93592664333132558, 0.93593450764373598, 0.93594229590377809, 0.93595000841650133, 0.93595764551070859, 0.9359652075394872, 0.93597269488058643, 0.93598010793673336, 0.93598744713577586, 0.93599471293076275, 0.93600190579989473, 0.93600902624634297, 0.93601607479803106, 0.93602305200724478, 0.93602995845024906, 0.93603679472669599, 0.93604356145909129, 0.93605025929209462, 0.93605688889182403, 0.93606345094506427, 0.93606994615845596, 0.93607637525766452, 0.93608273898648997, 0.93608903810598765, 0.93609527339356402, 0.93610144564206765, 0.93610755565892778, 0.93611360426523182, 0.93611959229487729, 0.93612552059373999, 0.93613139001885737, 0.936137201437648, 0.93614295572718942, 0.93614865377348666, 0.93615429647085835, 0.93615988472129497, 0.93616541943387777, 0.93617090152433269, 0.93617633191447069, 0.93618171153181562, 0.93618704130917041, 0.93619232218434623, 0.93619755509976077, 0.93620274100226308, 0.93620788084284734, 0.93621297557642458, 0.93621802616171679, 0.93622303356104342, 0.93622799874016194, 0.93623292266817282, 0.93623780631739806, 0.93624265066320722, 0.93624745668393705, 0.9362522253607074, 0.93625695767730455, 0.9362616546199829, 0.9362663171772686, 0.93627094633969155, 0.93627554309955696, 0.93628010845059839, 0.93628464338760065, 0.93628914890601533, 0.93629362600144062, 0.93629807566909051, 0.93630249890317829, 0.93630689669623046, 0.93631127003834191, 0.93631561991629464, 0.93631994731265467, 0.93632425320477841, 0.93632853856366749, 0.93633280435283561, 0.93633705152699021, 0.93634128103067238, 0.93634549379683751, 0.93634969074526886, 0.93635387278098658, 0.93635804079251617, 0.93636219565014689, 0.93636633820403725, 0.93637046928233114, 0.93637458968917764, 0.93637870020274028, 0.93638280157310061, 0.93638689452023749, 0.93639097973191687, 0.93639505786162081, 0.93639912952647697, 0.93640319530524374, 0.9364072557363422, 0.93641131131593491, 0.93641536249613544, 0.93641940968327464, 0.93642345323635667, 0.93642749346561849, 0.93643153063125373, 0.93643556494238611, 0.93643959655615217, 0.93644362557709848, 0.93644765205678426, 0.93645167599361634, 0.93645569733300205, 0.9364597159677398, 0.93646373173875663, 0.93646774443604841, 0.9364717538000088, 0.93647575952300965, 0.93647976125129717, 0.93648375858715038, 0.93648775109133153, 0.93649173828580512, 0.9364957196567093, 0.93649969465748029, 0.93650366271229668, 0.93650762321962489, 0.93651157555585507, 0.93651551907922082, 0.93651945313358542, 0.93652337705247979, 0.93652729016296987, 0.93653119178967736, 0.93653508125858975, 0.93653895790086672, 0.93654282105656761, 0.93654667007804748, 0.93655050433344411, 0.93655432320970511, 0.93655812611554867, 0.93656191248412912, 0.9365656817754896, 0.9365694334786101, 0.93657316711337801, 0.93657688223213587, 0.9365805784209662, 0.93658425530073841, 0.93658791252787033, 0.93659154979478132, 0.93659516683015687, 0.93659876339891135, 0.93660233930194925, 0.93660589437573971, 0.93660942849164186, 0.93661294155512309, 0.9366164335047209, 0.93661990431101771, 0.93662335397534258, 0.93662678252848686, 0.93663019002924885, 0.93663357656300772, 0.93663694224017446, 0.93664028719462467, 0.93664361158215104, 0.93664691557884483, 0.93665019937955107, 0.93665346319627796, 0.93665670725667094, 0.93665993180249008, 0.93666313708814197, 0.93666632337923106, 0.93666949095121343, 0.93667264008801332, 0.93667577108076427, 0.93667888422657697, 0.9366819798273367, 0.93668505818863101, 0.93668811961861975, 0.93669116442704703, 0.93669419292427303, 0.93669720542038837, 0.93670020222426442, 0.93670318364282423, 0.93670614998021584, 0.93670910153709275, 0.93671203860990437, 0.93671496149028577, 0.9367178704643726, 0.93672076581228503, 0.93672364780753137, 0.93672651671651452, 0.93672937279802504, 0.93673221630278913, 0.93673504747304048, 0.93673786654209867, 0.93674067373400804, 0.93674346926316809, 0.93674625333399875, 0.93674902614065536, 0.9367517878667192, 0.93675453868496383, 0.93675727875710368, 0.93676000823359706, 0.93676272725345044, 0.93676543594410555, 0.93676813442122919, 0.93677082278871326, 0.936773501138496, 0.9367761695505864, 0.93677882809303958, 0.93678147682195512, 0.93678411578152021, 0.93678674500410442, 0.93678936451036077, 0.93679197430936856, 0.93679457439882052, 0.93679716476521602, 0.93679974538412136, 0.93680231622045407, 0.93680487722878947, 0.93680742835368647, 0.93680996953012885, 0.93681250068386623, 0.93681502173192954, 0.93681753258306255, 0.9368200331382559, 0.9368225232912667, 0.93682500292919235, 0.93682747193306537, 0.93682993017846006, 0.93683237753609905, 0.93683481387255552, 0.93683723905086547, 0.93683965293126459, 0.93684205537179699, 0.93684444622911189, 0.93684682535907204, 0.93684919261749777, 0.93685154786086777, 0.9368538909469849, 0.93685622173567784, 0.93685854008946923, 0.93686084587421969, 0.9368631389597627, 0.93686541922052313, 0.93686768653611008, 0.93686994079184827, 0.93687218187936117, 0.93687440969700975, 0.93687662415038142, 0.93687882515273313, 0.93688101262536583, 0.93688318649794189, 0.9368853467088476, 0.93688749320542719, 0.9368896259442121, 0.9368917448911166, 0.93689385002155356, 0.93689594132055065, 0.93689801878279455, 0.93690008241266221, 0.93690213222418062, 0.93690416824095768, 0.93690619049612045, 0.93690819903212286, 0.93691019390063901, 0.93691217516229253, 0.93691414288647701, 0.93691609715108493, 0.93691803804222595, 0.93691996565389324, 0.9369218800876713, 0.93692378145239141, 0.93692566986374393, 0.93692754544391532, 0.93692940832121607, 0.93693125862969429, 0.93693309650870182, 0.93693492210253015, 0.93693673555997792, 0.93693853703397356, 0.93694032668115179, 0.93694210466145467, 0.93694387113776767, 0.93694562627550415, 0.93694737024225339, 0.93694910320737534, 0.9369508253416966, 0.93695253681712065, 0.93695423780633147, 0.9369559284824629, 0.93695760901878344, 0.93695927958841685, 0.93696094036409028, 0.93696259151783268, 0.93696423322076261, 0.9369658656428852, 0.93696748895279258, 0.93696910331758088, 0.93697070890259471, 0.93697230587127323, 0.93697389438503664, 0.93697547460311081, 0.9369770466824201, 0.93697861077749467, 0.93698016704035469, 0.93698171562043975, 0.93698325666454485, 0.93698479031676385, 0.93698631671843724, 0.93698783600813307, 0.93698934832158254, 0.93699085379173219, 0.9369923525486874, 0.93699384471973846, 0.93699533042936356, 0.93699680979925226, 0.93699828294835541, 0.93699974999285851, 0.93700121104629275, 0.93700266621953698, 0.93700411562088903, 0.93700555935609109, 0.93700699752843519, 0.93700843023880376, 0.9370098575857182, 0.93701127966544528, 0.93701269657204078, 0.93701410839746846, 0.93701551523160964, 0.93701691716241486, 0.93701831427593785, 0.93701970665645129, 0.93702109438650472, 0.93702247754702839, 0.93702385621740303, 0.93702523047557651, 0.93702660039810326, 0.93702796606029226, 0.93702932753621404, 0.93703068489887875, 0.93703203822024228, 0.93703338757134824, 0.93703473302238849, 0.93703607464277072, 0.93703741250123018, 0.93703874666590314, 0.93704007720440163, 0.93704140418389514, 0.93704272767118502, 0.93704404773279326, 0.93704536443501252, 0.93704667784402229, 0.93704798802590283, 0.93704929504675483, 0.93705059897273713, 0.93705189987014892, 0.93705319780548058, 0.9370544928455028, 0.93705578505728004, 0.9370570745082708, 0.93705836126635444, 0.9370596453999146, 0.93706092697787269, 0.93706220606971524, 0.93706348274559881, 0.93706475707633385, 0.93706602913346304, 0.9370672989892973, 0.93706856671694416, 0.93706983239035313, 0.93707109608434402, 0.93707235787462861, 0.9370736178378728, 0.93707487605169193, 0.93707613259467393, 0.93707738754643677, 0.93707864098760574, 0.93707989299985839, 0.93708114366592921, 0.9370823930696357, 0.93708364129585975, 0.93708488843060389, 0.93708613456094991, 0.93708737977510725, 0.93708862416235705, 0.93708986781314163, 0.93709111081895613, 0.93709235327244078, 0.93709359526729874, 0.93709483689834738, 0.93709607826147467, 0.93709731945362185, 0.93709856057279417, 0.93709980171803053, 0.93710104298938146, 0.93710228448788568, 0.93710352631555949, 0.93710476857537739, 0.93710601137120242, 0.93710725480781787, 0.93710849899084925, 0.9371097440267433, 0.93711099002273512, 0.93711223708681524, 0.93711348532768413, 0.93711473485469365, 0.93711598577783017, 0.93711723820764303, 0.93711849225522093, 0.93711974803210385, 0.93712100565025969, 0.93712226522201048, 0.93712352685998168, 0.93712479067703025, 0.93712605678619076, 0.93712732530060538, 0.93712859633343826, 0.93712986999782322, 0.93713114640677431, 0.93713242567311439, 0.93713370790939876, 0.93713499322781124, 0.93713628174008401, 0.93713757355742777, 0.93713886879040409, 0.93714016754885121, 0.93714146994177472, 0.93714277607725105, 0.93714408606232258, 0.93714540000285851, 0.93714671800349281, 0.93714804016745878, 0.93714936659652115, 0.93715069739078161, 0.93715203264863178, 0.93715337246657016, 0.93715471693910035, 0.93715606615857272, 0.93715742021507509, 0.937158779196268, 0.93716014318726415, 0.93716151227046152, 0.93716288652542823, 0.9371642660287216, 0.93716565085376269]}
np_all_performances = eval(open(folder+'nmtf_np_performances.txt','r').read())
# ICM NMTF
#icm_all_performances = {'R^2': [0.9288177442589758, 0.9382287011236542, 0.9418203899864597, 0.9460128280404809, 0.9522883341388004, 0.9595457624926458, 0.9656885834266021, 0.9699748031764066, 0.9725999243410073, 0.974172214388124, 0.9751600751033723, 0.9758339191664401, 0.9763306418389491, 0.9767265135122127, 0.9770435156342856, 0.9773156898933042, 0.9775581979741197, 0.9777792516208544, 0.9779848864143771, 0.9781797334003397, 0.9783684955978944, 0.9785519239167353, 0.9787346370702479, 0.9789196522092244, 0.9791101181706288, 0.979309927497815, 0.9795224632110561, 0.9797498421420886, 0.9799983905326746, 0.9802732667024991, 0.9805791511830504, 0.9809203269838284, 0.9813011607831511, 0.9817290522918887, 0.9822062551135903, 0.9827368747345924, 0.9833244560179435, 0.9839666017876979, 0.984659952168261, 0.9854029585781526, 0.9861900386678588, 0.9870112862799908, 0.9878499309050247, 0.9886805982448128, 0.9894979224362951, 0.9902767181882643, 0.9909985800068991, 0.9916802560992952, 0.992295521226413, 0.9928413177614401, 0.9933312307465926, 0.9937686064150106, 0.9941555116674502, 0.9944944154961567, 0.9947895649063778, 0.9950449929079808, 0.9952623900823357, 0.9954466356495406, 0.9956019554898691, 0.9957331157101431, 0.9958434692290997, 0.9959363582216619, 0.9960146840052838, 0.9960808244328051, 0.9961367522112476, 0.9961836648090953, 0.9962228758725375, 0.996256400962399, 0.9962851097196743, 0.9963098258632112, 0.9963312077778805, 0.9963497935221144, 0.996366026625379, 0.9963802728027767, 0.9963928375171478, 0.9964039753259945, 0.9964138989078771, 0.9964227859440662, 0.9964307850420339, 0.9964380207291804, 0.9964445978277849, 0.9964506056642113, 0.9964561220734516, 0.9964612029498067, 0.9964659000439121, 0.9964702566157648, 0.9964743134550366, 0.9964781027249976, 0.9964816512694582, 0.9964849825025989, 0.9964881183399927, 0.9964910752826958, 0.9964938687267392, 0.9964965120598348, 0.9964990170024879, 0.9965013938866413, 0.9965036518521448, 0.9965057989706391, 0.9965078425215448, 0.9965097890916235, 0.9965116446024694, 0.9965134144989232, 0.9965151038197939, 0.9965167171386687, 0.9965182586893899, 0.9965197324568198, 0.9965211418591099, 0.9965224903770997, 0.9965237812557216, 0.9965250175319493, 0.996526202055565, 0.9965273375078822, 0.9965284264170015, 0.9965294711711425, 0.996530474031543, 0.9965314371385793, 0.996532362525351, 0.9965332521241852, 0.9965341077734834, 0.9965349299297284, 0.9965357212900875, 0.9965364837090278, 0.9965372172642156, 0.9965379230407762, 0.9965386041477567, 0.9965392619039739, 0.9965398975856726, 0.9965405119998524, 0.9965411069248207, 0.9965416831829101, 0.996542241857783, 0.9965427840051146, 0.9965433106322324, 0.9965438227014591, 0.9965443211328916, 0.9965448068072218, 0.9965452805685177, 0.9965457432276982, 0.996546195604077, 0.9965466384475147, 0.9965470724773619, 0.9965474983891589, 0.9965479168785698, 0.9965483286117983, 0.99654873422099, 0.9965491343210678, 0.9965495295269977, 0.9965499204540275, 0.9965503076740128, 0.9965506918551651, 0.9965510698444803, 0.9965514446460894, 0.9965518175733223, 0.9965521892359364, 0.9965525602521278, 0.9965529311766528, 0.9965533025901314, 0.9965536750432833, 0.9965540490746913, 0.9965544252206944, 0.9965548041378417, 0.9965551867449614, 0.9965555737161053, 0.996555965640454, 0.9965563630684473, 0.9965567665325351, 0.9965571765585783, 0.9965575936720976, 0.9965580184016902, 0.9965584512830463, 0.9965588928613773, 0.9965593436934195, 0.9965598043495258, 0.9965602754157055, 0.9965607574955951, 0.9965612510094215, 0.9965617558253514, 0.9965622719360466, 0.9965628015387199, 0.9965633453643038, 0.9965639041604548, 0.9965644786961658, 0.9965650697787399, 0.996565678253401, 0.9965663050088793, 0.9965669509681488, 0.9965676170334197, 0.996568304264343, 0.9965690138184005, 0.9965697469180335, 0.9965705048343321, 0.9965712888901356, 0.9965721004308128, 0.9965729409265306, 0.9965738118967407, 0.9965747149307094, 0.9965756517004156, 0.9965766239714315, 0.9965776337291536, 0.9965786830617244, 0.9965797740653883, 0.9965809089594835, 0.9965820900962561, 0.9965833199681247, 0.9965846012099271, 0.9965859366141216, 0.9965873291506624, 0.996588781973032, 0.9965902984310987, 0.9965918820853915, 0.996593516280625, 0.9965952040624987, 0.9965969674349808, 0.9965988107820811, 0.9966007385237398, 0.9966027533955147, 0.9966048637244879, 0.9966070747836465, 0.9966093915970226, 0.9966118191890845, 0.9966143657384746, 0.9966170382117072, 0.9966198440143872, 0.9966227910432239, 0.9966258875768328, 0.9966291448306024, 0.9966325690349079, 0.9966361673866537, 0.9966399508929759, 0.9966439313094287, 0.996648121078328, 0.996652533338605, 0.9966571819558705, 0.9966620815739075, 0.9966672554783558, 0.996672703775935, 0.9966784388956732, 0.9966844797968025, 0.9966908463942735, 0.9966975593232852, 0.9967046399418474, 0.9967121460249945, 0.9967200177622165, 0.9967282626954845, 0.9967368670134645, 0.9967458653885639, 0.9967553034619616, 0.9967651326624041, 0.9967753123189906, 0.9967859962929266, 0.9967972270561245, 0.9968090433828972, 0.9968214624709999, 0.9968345053933296, 0.9968482016708096, 0.9968625764972829, 0.9968776547850084, 0.9968934695723696, 0.9969100352332168, 0.9969273628046704, 0.9969454669146812, 0.9969643652670741, 0.9969840691073129, 0.9970045544496247, 0.9970257765945071, 0.9970477856419219, 0.9970705728863065, 0.9970940696041724, 0.9971181656685212, 0.9971429144671222, 0.9971683207260185, 0.9971943608886645, 0.9972209693189135, 0.9972481136680906, 0.9972757336178006, 0.9973037789111586, 0.9973321825301111, 0.9973608415960363, 0.9973896690929039, 0.9974186083016627, 0.9974475670064834, 0.9974765520436261, 0.9975054375714574, 0.9975341826093523, 0.9975627808321784, 0.9975911638313025, 0.997619269811127, 0.997647038015551, 0.9976744122463923, 0.9977013407325264, 0.9977277761633317, 0.9977536756023909, 0.9977789992185001, 0.9978036518579755, 0.9978276547924937, 0.9978510026331857, 0.9978736763681845, 0.9978956599396783, 0.997916940169419, 0.9979375066936276, 0.9979573447080864, 0.9979764501305581, 0.99799482126879, 0.998012462513662, 0.9980292914947395, 0.9980451696761796, 0.9980602506704596, 0.9980746246350353, 0.9980882668781031, 0.9981012619263807, 0.9981136413732812, 0.9981254215199644, 0.9981366258166697, 0.9981472735607358, 0.9981573831309614, 0.9981669732530801, 0.9981760633630041, 0.9981846733677358, 0.9981928322226837, 0.99820056116082, 0.9982078711704311, 0.9982147863619651, 0.99822132774574, 0.9982275147767306, 0.9982333690811201, 0.9982389150606759, 0.9982441725532445, 0.9982491581241156, 0.9982538605779645, 0.9982583084944916, 0.9982625306635545, 0.9982665415042848, 0.9982703549033964, 0.9982739823628896, 0.998277434193844, 0.9982807200317797, 0.9982838489447018, 0.9982868295692282, 0.9982896699177997, 0.9982923779433869, 0.998294960695057, 0.9982974244765395, 0.9982997760150498, 0.9983020215302472, 0.9983041668162563, 0.9983062170194611, 0.9983081660730347, 0.9983100294633308, 0.9983118121449644, 0.9983135189061296, 0.9983151541128472, 0.9983167218044512, 0.9983182258432376, 0.9983196697563184, 0.9983210567168296, 0.9983223881010884, 0.998323666154919, 0.9983248954226643, 0.9983260784988466, 0.9983272178873521, 0.9983283158972337, 0.9983293746336928, 0.9983303961763181, 0.998331382464184, 0.9983323353067407, 0.9983332563942254, 0.9983341473208666, 0.9983350095696754, 0.9983358445225263, 0.9983366513229393, 0.9983374164844699, 0.9983381541739101, 0.9983388732035502, 0.9983395736371682, 0.9983402555639372, 0.9983409193652897, 0.9983415655991935, 0.9983421948964311, 0.998342807898951, 0.9983434052273583, 0.9983439874831933, 0.9983445552325293, 0.998345109007519, 0.9983456493086772, 0.9983461764855288, 0.9983466909779795, 0.9983471932409029, 0.9983476837246695, 0.9983481628201073, 0.9983486309185619, 0.9983490883860965, 0.9983495355676086, 0.9983499727887961, 0.9983504003578134, 0.9983508185667033, 0.9983512276989692, 0.998351628058565, 0.9983520199415841, 0.998352403626381, 0.998352779417067, 0.998353147561707, 0.998353508282963, 0.9983538617820248, 0.9983542082473217, 0.9983545478567425, 0.9983548807792443, 0.9983552071760383, 0.9983555272018213, 0.9983558409839577, 0.998356148626513, 0.9983564502559094, 0.9983567460025956, 0.9983570359973701, 0.9983573203689691, 0.9983575992430295, 0.9983578727416722, 0.9983581406638283, 0.9983584029718375, 0.998358660042769, 0.9983589120063292, 0.9983591590285038, 0.9983594008664224, 0.9983596371424162, 0.9983598685189689, 0.9983600952416475, 0.99836031750884, 0.9983605354844459, 0.9983607493069975, 0.9983609590961682, 0.9983611649576162, 0.9983613669866327, 0.998361565270858, 0.9983617598922584, 0.9983619509285345, 0.9983621384540987, 0.9983623225407403, 0.9983625032580682, 0.9983626806738031, 0.9983628548539674, 0.9983630258630115, 0.9983631937639004, 0.9983633586181746, 0.9983635204860003, 0.9983636794262105, 0.998363835496343, 0.9983639887526764, 0.9983641392502657, 0.9983642870429769, 0.99836443218352, 0.9983645747235897, 0.9983647147135974, 0.998364852201692, 0.9983649872354646, 0.9983651198637148, 0.9983652501342443, 0.9983653780938446, 0.9983655037882867, 0.9983656272623221, 0.9983657485590018, 0.9983658677231966, 0.9983659847958768, 0.9983660998170368, 0.9983662128265866, 0.9983663238635407, 0.9983664329660684, 0.998366540171519, 0.9983666455162705, 0.9983667490361311, 0.998366850766144, 0.9983669507406129, 0.9983670489931231, 0.9983671455325255, 0.9983672403763989, 0.9983673335618203, 0.9983674251249138, 0.9983675151009159, 0.9983676035241661, 0.998367690428088, 0.9983677758451757, 0.9983678598069928, 0.9983679423441836, 0.998368023486493, 0.9983681032627946, 0.9983681816907082, 0.9983682587949679, 0.9983683345992147, 0.9983684091286387, 0.9983684824090577, 0.9983685544661876, 0.9983686253252948, 0.9983686950110499, 0.9983687635474797, 0.9983688309579667, 0.9983688972664382, 0.9983689625120022, 0.9983690267237842, 0.9983690899236438, 0.9983691521315748, 0.9983692133667668, 0.9983692736479233, 0.9983693329933355, 0.9983693914208812, 0.9983694489480112, 0.9983695055917448, 0.9983695613686757, 0.9983696162949861, 0.9983696703864636, 0.9983697236585204, 0.9983697761262118, 0.9983698278085332, 0.9983698787235452, 0.9983699288836103, 0.9983699783010831, 0.9983700269886246, 0.9983700749596741, 0.9983701222275096, 0.9983701688049934, 0.9983702147049004, 0.9983702599389392, 0.9983703045276491, 0.9983703484844674, 0.9983703918199206, 0.9983704345446653, 0.9983704766694633, 0.9983705182050837, 0.9983705591622143, 0.998370599551395, 0.9983706393829704, 0.9983706786670604, 0.9983707174135426, 0.9983707556320446, 0.998370793331942, 0.9983708305223619, 0.9983708672121879, 0.9983709034120677, 0.9983709391339831, 0.9983709743868763, 0.9983710091794787, 0.998371043520273, 0.9983710774174734, 0.9983711108790274, 0.9983711439126242, 0.998371176525708, 0.9983712087254928, 0.9983712405189761, 0.9983712719129526, 0.9983713029140261, 0.9983713335286208, 0.9983713637659698, 0.9983713936331448, 0.9983714231365998, 0.9983714522826299, 0.9983714810773523, 0.998371509526701, 0.998371537636431, 0.9983715654121269, 0.9983715928592108, 0.9983716199829532, 0.9983716467884802, 0.9983716732807834, 0.9983716994647258, 0.9983717253450503, 0.9983717509263844, 0.9983717762132469, 0.9983718012100528, 0.9983718259211177, 0.9983718503506628, 0.9983718745028183, 0.9983718983816278, 0.998371921991051, 0.9983719453349678, 0.9983719684171803, 0.9983719912414166, 0.9983720138113328, 0.9983720361305154, 0.9983720582024839, 0.9983720800306929, 0.9983721016185338, 0.9983721229693375, 0.9983721440863751, 0.9983721649728604, 0.9983721856319514, 0.9983722060667517, 0.9983722262803117, 0.9983722462756305, 0.9983722660556569, 0.9983722856232906, 0.9983723049813837, 0.9983723241327414, 0.9983723430801232, 0.9983723618262447, 0.9983723803737771, 0.9983723987253494, 0.9983724168835492, 0.9983724348509229, 0.998372452629977, 0.9983724702231791, 0.9983724876329584, 0.9983725048617065, 0.998372521911778, 0.9983725387854918, 0.998372555485131, 0.9983725720129444, 0.9983725883711461, 0.9983726045619173, 0.9983726205874061, 0.9983726364497283, 0.9983726521509683, 0.998372667693179, 0.9983726830783832, 0.9983726983085734, 0.9983727133857127, 0.998372728311735, 0.9983727430885461, 0.9983727577180235, 0.9983727722020173, 0.9983727865423504, 0.9983728007408195, 0.9983728147991945, 0.9983728287192201, 0.9983728425026154, 0.9983728561510747, 0.9983728696662679, 0.9983728830498407, 0.9983728963034152, 0.9983729094285899, 0.9983729224269409, 0.998372935300021, 0.9983729480493616, 0.9983729606764717, 0.9983729731828389, 0.9983729855699297, 0.9983729978391898, 0.9983730099920443, 0.998373022029898, 0.9983730339541362, 0.9983730457661243, 0.9983730574672087, 0.9983730690587167, 0.998373080541957, 0.9983730919182199, 0.9983731031887775, 0.9983731143548845, 0.9983731254177777, 0.9983731363786766, 0.9983731472387842, 0.9983731579992862, 0.9983731686613522, 0.9983731792261353, 0.9983731896947731, 0.9983732000683868, 0.9983732103480827, 0.9983732205349515, 0.998373230630069, 0.9983732406344964, 0.9983732505492801, 0.9983732603754519, 0.9983732701140302, 0.9983732797660189, 0.9983732893324083, 0.9983732988141754, 0.9983733082122836, 0.9983733175276835, 0.9983733267613127, 0.9983733359140962, 0.9983733449869461, 0.9983733539807627, 0.9983733628964337, 0.9983733717348353, 0.9983733804968316, 0.9983733891832751, 0.998373397795007, 0.9983734063328572, 0.9983734147976446, 0.998373423190177, 0.9983734315112515, 0.9983734397616547, 0.9983734479421625, 0.998373456053541, 0.9983734640965455, 0.9983734720719218, 0.9983734799804059, 0.9983734878227237, 0.9983734955995918, 0.9983735033117174, 0.9983735109597983, 0.9983735185445235, 0.9983735260665725, 0.9983735335266162, 0.9983735409253169, 0.998373548263328, 0.9983735555412947, 0.9983735627598536, 0.998373569919633, 0.9983735770212534, 0.9983735840653271, 0.9983735910524585, 0.9983735979832444, 0.9983736048582738, 0.998373611678128, 0.9983736184433812, 0.9983736251546002, 0.9983736318123442, 0.9983736384171659, 0.9983736449696105, 0.9983736514702164, 0.9983736579195154, 0.9983736643180577, 0.9983736706663412, 0.998373676964866, 0.998373683214131, 0.9983736894146318, 0.9983736955668586, 0.9983737016713125, 0.9983737077284669, 0.9983737137387813, 0.998373719702716, 0.9983737256207292, 0.998373731493275, 0.9983737373208026, 0.9983737431037564, 0.9983737488425756, 0.9983737545376944, 0.9983737601895418, 0.9983737657985419, 0.9983737713651137, 0.9983737768896712, 0.9983737823726238, 0.9983737878143756, 0.9983737932153265, 0.9983737985758712, 0.9983738038964, 0.9983738091772983, 0.9983738144189476, 0.998373819621724, 0.9983738247859998, 0.9983738299121425, 0.9983738350005157, 0.9983738400514782, 0.9983738450653851, 0.9983738500425866, 0.9983738549834296, 0.9983738598882563, 0.9983738647574052, 0.9983738695912107, 0.9983738743900034, 0.9983738791541098, 0.998373883883853, 0.9983738885795518, 0.9983738932415218, 0.9983738978700747, 0.9983739024655185, 0.9983739070281578, 0.9983739115582936, 0.9983739160560327, 0.9983739205216801, 0.9983739249556624, 0.9983739293582888, 0.9983739337298533, 0.9983739380706445, 0.9983739423809465, 0.9983739466610395, 0.9983739509111996, 0.9983739551316988, 0.9983739593228046, 0.998373963484781, 0.9983739676178887, 0.9983739717223842, 0.9983739757985214, 0.9983739798465507, 0.9983739838667196, 0.9983739878592728, 0.9983739918244519, 0.9983739957624961, 0.9983739996736417, 0.9983740035581224, 0.9983740074161696, 0.9983740112480117, 0.998374015053875, 0.9983740188339834, 0.9983740225885578, 0.9983740263178175, 0.9983740300219789, 0.9983740337012563, 0.9983740373558617, 0.9983740409860047, 0.9983740445918927, 0.9983740481737311, 0.998374051731723, 0.9983740552660691, 0.9983740587769684, 0.9983740622646174, 0.9983740657292108, 0.9983740691709214, 0.9983740725899004, 0.9983740759863196, 0.9983740793603669, 0.9983740827122286, 0.9983740860420884, 0.9983740893501284, 0.9983740926365308, 0.9983740959014742, 0.9983740991451329, 0.998374102367682, 0.9983741055692948, 0.9983741087501428, 0.9983741119103952, 0.9983741150502199, 0.9983741181697826, 0.9983741212692476, 0.9983741243487774, 0.9983741274085325, 0.9983741304486723, 0.9983741334693541, 0.9983741364707339, 0.998374139452966, 0.9983741424162031, 0.9983741453605965, 0.9983741482862959, 0.9983741511934494, 0.9983741540822038, 0.9983741569527039, 0.9983741598050928, 0.9983741626395122, 0.998374165456103, 0.9983741682550048, 0.998374171036356, 0.9983741738002934, 0.9983741765469528, 0.9983741792764683, 0.9983741819889731, 0.9983741846845986, 0.9983741873634754, 0.9983741900257325, 0.9983741926714977, 0.9983741953008979, 0.9983741979140583, 0.9983742005111033, 0.9983742030921559, 0.9983742056573377, 0.9983742082067696, 0.9983742107405709, 0.9983742132588602, 0.998374215761755, 0.9983742182493717, 0.9983742207218254, 0.9983742231792301, 0.9983742256216991, 0.9983742280493443, 0.9983742304622765, 0.9983742328606057, 0.9983742352444408, 0.9983742376138895, 0.9983742399690587, 0.998374242310053, 0.9983742446369338, 0.9983742469498099, 0.9983742492487946, 0.9983742515339976, 0.9983742538055242, 0.9983742560634762, 0.9983742583079527, 0.9983742605390513, 0.998374262756869, 0.998374264961502, 0.9983742671530456, 0.9983742693315949, 0.9983742714972446, 0.9983742736500885, 0.99837427579022, 0.9983742779177315, 0.9983742800327151, 0.9983742821352617, 0.9983742842254617, 0.9983742863034579, 0.9983742883694243, 0.9983742904232695, 0.9983742924650993, 0.9983742944950127, 0.9983742965131037, 0.9983742985194625, 0.9983743005141754, 0.9983743024973268, 0.9983743044689991, 0.9983743064292735, 0.9983743083782302, 0.9983743103159486, 0.9983743122425077, 0.9983743141579856, 0.9983743160624597, 0.9983743179560068, 0.9983743198387033, 0.9983743217106245, 0.9983743235718454, 0.9983743254224405, 0.9983743272624832, 0.9983743290920468, 0.9983743309112035, 0.998374332720023, 0.9983743345185707, 0.9983743363069167, 0.9983743380851303, 0.9983743398532339, 0.9983743416112923, 0.9983743433593786, 0.9983743450975651, 0.9983743468259226, 0.998374348544521, 0.9983743502534282, 0.9983743519533005, 0.9983743536444841, 0.9983743553269848, 0.998374357000778, 0.998374358665865, 0.998374360322267, 0.9983743619700229, 0.9983743636091853, 0.9983743652398175, 0.9983743668619914, 0.9983743684757856, 0.9983743700812827, 0.9983743716785687, 0.9983743732677315, 0.9983743748488603, 0.9983743764220445, 0.9983743779873739, 0.9983743795449375, 0.9983743810948236, 0.9983743826371195, 0.9983743841719119, 0.9983743856992858, 0.9983743872193251, 0.9983743887321127, 0.9983743902377297, 0.9983743917362561, 0.9983743932277707, 0.9983743947123508, 0.9983743961900724, 0.9983743976610104, 0.9983743991252382, 0.9983744005829918, 0.9983744020344106, 0.9983744034795383, 0.9983744049183222, 0.998374406350812, 0.9983744077771296, 0.998374409197406, 0.9983744106117758, 0.9983744120203722, 0.9983744134233246, 0.9983744148207564, 0.9983744162127849, 0.9983744175995196, 0.9983744189810632, 0.9983744203575112, 0.9983744217289526, 0.9983744230954698, 0.9983744244571392, 0.9983744258140324, 0.9983744271662153, 0.9983744285137496, 0.9983744298566929, 0.9983744311950988, 0.9983744325290177, 0.9983744338584968, 0.9983744351835805, 0.9983744365043106, 0.9983744378207268, 0.9983744391328665, 0.9983744404407655, 0.9983744417444577, 0.9983744430439754, 0.9983744443393497, 0.9983744456306105, 0.9983744469177863, 0.9983744482009048, 0.9983744494799925, 0.9983744507550755, 0.9983744520261785, 0.9983744532933259, 0.9983744545565414, 0.9983744558151675, 0.9983744570694429, 0.9983744583198825, 0.9983744595664964, 0.9983744608092981, 0.9983744620483042, 0.9983744632835327, 0.9983744645150033, 0.9983744657427356, 0.9983744669667495, 0.9983744681870647, 0.9983744694037008, 0.9983744706166773, 0.9983744718260134, 0.998374473031728, 0.9983744742338397, 0.9983744754323671, 0.9983744766273283, 0.9983744778187411], 'MSE': [40.796618714911304, 35.402925933558627, 33.344424702415196, 30.941616657134048, 27.344941799460347, 23.185498770852092, 19.664869635575005, 17.208312575943694, 15.703779372861485, 14.802654269509386, 14.23648259478675, 13.850282986076676, 13.565596791934025, 13.338711235329491, 13.157028109789403, 13.001037134881349, 12.862048708547851, 12.735356441669019, 12.61750116750652, 12.505828699258457, 12.397643600019061, 12.292515501627577, 12.187797285262393, 12.081759734151708, 11.97259815835322, 11.858081628186508, 11.736271236451493, 11.605953765382731, 11.463503462039851, 11.305963943554394, 11.130652661354233, 10.93511489826718, 10.716847984090579, 10.471610929552391, 10.198112128987962, 9.8939986089074274, 9.5572387053142531, 9.1892063093843319, 8.7918270634303752, 8.3659885044871896, 7.9148900392366865, 7.4442091742669811, 6.9635575757942627, 6.4874779912478369, 6.0190457526336782, 5.5726952820124831, 5.1589753026002505, 4.7682869303500697, 4.4156606116376178, 4.1028487612175892, 3.822065396730431, 3.5713926947504815, 3.3496460544079869, 3.1554104245191823, 2.9862517229948926, 2.8398585147152882, 2.7152618783832447, 2.6096653912121206, 2.5206470784489836, 2.4454753458116651, 2.3822285381648567, 2.328991157964905, 2.2841003758914407, 2.2461933754018393, 2.2141395407917508, 2.1872525680849884, 2.1647795684869182, 2.1455653655338804, 2.12911154270759, 2.1149460028061235, 2.1026914063328546, 2.0920393763691036, 2.0827357132925424, 2.0745708151533471, 2.0673696123237688, 2.060986210503962, 2.0552987174323163, 2.0502052988107655, 2.0456207833798996, 2.0414738008687832, 2.0377042745841538, 2.0342610089916624, 2.0310993945962008, 2.0281873967501416, 2.0254953556608033, 2.0229984777671355, 2.0206733853233385, 2.0185016446247568, 2.016467870703933, 2.014558644213762, 2.0127614047026623, 2.0110666949359581, 2.0094656910010222, 2.0079507200112015, 2.0065150646043972, 2.0051528032440853, 2.0038586976167729, 2.0026281216307451, 2.0014569032385592, 2.0003412673756755, 1.9992778202045225, 1.9982634411287925, 1.9972952422605288, 1.9963706023440222, 1.9954870948414365, 1.9946424359122104, 1.9938346665145479, 1.9930617916781579, 1.9923219516379906, 1.9916134058162285, 1.9909345209199849, 1.9902837602103827, 1.9896596747597532, 1.9890608958098364, 1.9884861273796854, 1.9879341427575945, 1.9874037767148314, 1.9868939217788264, 1.9864035243081195, 1.9859323226792842, 1.9854787710672996, 1.9850418066231954, 1.9846213848342045, 1.9842168837835203, 1.9838265215857553, 1.9834495423889367, 1.9830852147399054, 1.9827330761238173, 1.9823921073410125, 1.9820618370888312, 1.9817416442889104, 1.98143092390291, 1.9811290986023751, 1.9808356168522216, 1.9805499513180929, 1.9802715972498985, 1.9800000708877672, 1.9797349074704691, 1.9794756374255973, 1.9792218309861935, 1.9789730758715987, 1.9787289734473377, 1.9784891250081902, 1.9782531487334623, 1.9780206823222102, 1.977791373344097, 1.9775648693443297, 1.9773408177065694, 1.9771188906836732, 1.9768987053042633, 1.9766820686465942, 1.9764672589559062, 1.9762535235245959, 1.9760405128831096, 1.975827872725241, 1.9756152851040858, 1.9754024172493709, 1.9751889535277292, 1.9749745852616583, 1.9747590050596446, 1.9745418366344434, 1.974322553378838, 1.9741007689741985, 1.9738761457439149, 1.9736483682150927, 1.9734171312251427, 1.9731821333879096, 1.9729430735122346, 1.9726996486436941, 1.9724515517626693, 1.9721984703926581, 1.9719400854519946, 1.9716760700546718, 1.9714060883419766, 1.9711297943520676, 1.9708469472408656, 1.9705576225657513, 1.970261824532598, 1.9699582938548144, 1.9696466116137274, 1.9693263493057533, 1.9689970661986216, 1.9686582996035784, 1.9683095650982536, 1.967950353325431, 1.9675801353020397, 1.9671983939450974, 1.96680452193794, 1.9663978558939796, 1.9659776951966406, 1.9655433113473273, 1.9650939461883872, 1.9646288286498188, 1.96414711613629, 1.9636479378059513, 1.9631303828040114, 1.9625934928708002, 1.9620362561056437, 1.9614575346201828, 1.9608561316391955, 1.9602308457441655, 1.9595804049678514, 1.9589034611696794, 1.9581985858722091, 1.9574642689732911, 1.9566989100339103, 1.9559008068871651, 1.9550681521805144, 1.9541990260071218, 1.9532913877244706, 1.9523547829603241, 1.9513874661359658, 1.9503768261378716, 1.9493203503627259, 1.9482155056144654, 1.9470607240651969, 1.9458512332224875, 1.9445840109841617, 1.9432561779315141, 1.94186485439568, 1.9404053529591749, 1.9388736809102929, 1.9372655938816334, 1.9355765660397906, 1.9338018526696312, 1.9319350259063, 1.9299725149405607, 1.927910195017625, 1.9257417576279632, 1.9234604653218508, 1.9210591870466505, 1.9185303924992487, 1.915866134895164, 1.9130580214570339, 1.9100927065081665, 1.9069701288844503, 1.903683165148738, 1.9002209492144158, 1.8965720672447446, 1.892724692607642, 1.8886665844071826, 1.8843646300928294, 1.8798531088052173, 1.8751276980218754, 1.8701963134193262, 1.8650390832338346, 1.8596298491796264, 1.8539964488532195, 1.848162191929408, 1.8420388960814189, 1.8356022194225028, 1.8288299392672644, 1.8217121990827885, 1.8142369213794169, 1.8063871868586119, 1.7981485561967421, 1.7895067514547143, 1.7804428373751167, 1.7709485758866679, 1.7610176412950158, 1.7506416498966475, 1.7398104551152314, 1.7285176118486933, 1.7167768670403669, 1.7046138392735772, 1.6919998148048097, 1.6789397830447981, 1.6654731314215458, 1.6516629769169398, 1.6374787214494881, 1.6229176563866932, 1.6079932826690384, 1.5927432182791399, 1.5771860031822778, 1.5613562076030847, 1.5452826352224283, 1.5290036960335993, 1.5125783527636818, 1.4960564768274189, 1.4794705755609612, 1.4628735005350941, 1.4462613336904437, 1.4297061985230872, 1.4132315822196171, 1.3968411098973619, 1.3805739885312476, 1.3644656349735751, 1.3485508703108733, 1.3328619033537992, 1.3174284056380503, 1.3022774922551856, 1.2874337713133859, 1.2729200712327646, 1.2587909273538012, 1.2450341482606864, 1.2316528224708172, 1.2186578462375046, 1.2060584227591569, 1.1938621048726106, 1.1820748323607424, 1.1707050899508076, 1.1597552172914769, 1.1492261842620508, 1.1391154743216578, 1.1294702963593914, 1.1203700492914102, 1.1117266933444125, 1.1034885566611985, 1.0956697908316286, 1.0882219511384177, 1.0811269304785289, 1.0743753861837171, 1.0679538782405333, 1.0618513467835309, 1.0560572583892947, 1.0505608807958149, 1.0453510747172574, 1.0404164308722277, 1.0357403540747108, 1.0313106750917518, 1.0271210962170314, 1.0231577990523397, 1.0194087419740734, 1.0158627747710289, 1.0125075028491612, 1.0093289408468749, 1.0063157191178573, 1.0034583436134425, 1.0007632306993657, 0.99821400051917542, 0.99579415276894145, 0.99349542342554942, 0.99130985360983559, 0.98923085119124587, 0.98725250659048436, 0.9853692974110736, 0.98357602651590947, 0.98186774400823029, 0.98023985772560984, 0.97868780958941171, 0.97720755957455263, 0.9757954948304246, 0.97444775979449083, 0.97316078979937637, 0.97193126406377572, 0.97075623304568071, 0.96963917381976872, 0.96857121070686303, 0.96754950407308216, 0.96657130966535576, 0.96563412519031389, 0.96473563558836439, 0.96387362726529269, 0.96304607873093617, 0.96225117136867488, 0.96148811637127585, 0.96075562659160507, 0.96005109753468909, 0.95937304220452102, 0.95872002555108948, 0.95809072418915764, 0.95748393156662803, 0.95689845581071253, 0.95633318558165503, 0.9557870838308905, 0.95525918183442504, 0.95474856589369084, 0.9542543860517585, 0.95377585031626722, 0.9533134495608574, 0.9528749132585449, 0.95245212200941554, 0.95204002523381004, 0.95163858637868814, 0.9512477543363751, 0.95086731049688622, 0.95049693507258826, 0.9501362665437173, 0.94978493699087474, 0.94944259072737303, 0.94910888299298601, 0.9487834893545567, 0.94846610481997817, 0.94815644252969533, 0.94785430216074251, 0.94755943159052025, 0.94727157011758067, 0.94699045962163786, 0.94671587610782704, 0.94644759528460531, 0.94618540735037571, 0.94592911463449647, 0.94567853047005068, 0.94543347824468593, 0.94519379058035624, 0.94495930499283676, 0.94472984727740195, 0.9445052477344501, 0.94428534683067444, 0.94406997027129536, 0.94385897588293699, 0.94365223605165249, 0.94344963546969707, 0.94325106614269383, 0.94305642611747764, 0.942865618559797, 0.94267855107483245, 0.94249513500138415, 0.9423152973431832, 0.94213897845724914, 0.94196610588744678, 0.94179660486967642, 0.94163040043962909, 0.94146741881486362, 0.9413075879891023, 0.94115083797140775, 0.9409972840002464, 0.94084694765973043, 0.94069961284062764, 0.94055520520434577, 0.94041362961969699, 0.94027502528280715, 0.94013960864665591, 0.94000700002244575, 0.93987705866867877, 0.93974967088347783, 0.93962474273121677, 0.93950219481502761, 0.93938195854660889, 0.93926397337023038, 0.93914818467157368, 0.93903454222162741, 0.93892299904299747, 0.93881351060328533, 0.93870603425452248, 0.93860052885227607, 0.93849695450152804, 0.93839527238969966, 0.93829544467737358, 0.93819743442606507, 0.93810120554894239, 0.93800672277511987, 0.9379139516216719, 0.93782285836973422, 0.93773341004276378, 0.93764557438577156, 0.93755931984514718, 0.93747461554882794, 0.93739143128695879, 0.93730973743200185, 0.9372295050916557, 0.93715070667042033, 0.93707331489245793, 0.93699730178923402, 0.93692263996420377, 0.93684930259981603, 0.9367772634635132, 0.93670649690665175, 0.93663697825571324, 0.93656868179378949, 0.93650158403959793, 0.9364356620712575, 0.936370893015258, 0.93630725451097629, 0.93624472468277808, 0.93618328212457713, 0.93612290598704861, 0.93606357574756693, 0.93600527132176381, 0.93594797304924693, 0.93589166168127547, 0.93583633214512296, 0.9357819743659358, 0.93572856709379004, 0.93567608962479021, 0.9356245217640774, 0.93557384383056719, 0.93552403666824246, 0.93547508165379156, 0.93542696069717535, 0.93537965623535957, 0.93533315122051497, 0.93528742910444607, 0.93524247978871522, 0.93519828909744185, 0.93515484348139866, 0.93511212850336478, 0.93507012936630018, 0.93502883133180503, 0.93498821991890335, 0.93494828098818883, 0.93490900076913519, 0.93487036586069494, 0.93483236254929558, 0.93479496842104426, 0.93475816678332635, 0.93472194510776851, 0.93468629193530728, 0.934651196267823, 0.93461664738579875, 0.93458263480593362, 0.93454914828207847, 0.93451617781266616, 0.93448371364337957, 0.93445174626381955, 0.93442026639964459, 0.93438926500248065, 0.93435873323908936, 0.93432866248085045, 0.93429904184100465, 0.93426986096847864, 0.93424111277796651, 0.93421279018864678, 0.93418488594415983, 0.9341573923420704, 0.9341303017723549, 0.93410360686323746, 0.93407730029309088, 0.93405137535131499, 0.93402582026633052, 0.93400062733672995, 0.93397579052955715, 0.93395130373713042, 0.9339271607916021, 0.93390335552091419, 0.93387988179947057, 0.93385673358666144, 0.93383390495362806, 0.93381139010040726, 0.93378918336581351, 0.93376727923204739, 0.93374567232557726, 0.93372435741551418, 0.93370332941030743, 0.9336825822075725, 0.93366210894003232, 0.93364190448280726, 0.93362196383154716, 0.93360228212475926, 0.93358285465439672, 0.9335636768656943, 0.93354474435174761, 0.93352605284596712, 0.93350759821384588, 0.93348937644487806, 0.93347138364489468, 0.93345361602906651, 0.93343606991542039, 0.93341874001227987, 0.93340162226660373, 0.93338471297947212, 0.93336800854284596, 0.9333515054509206, 0.933335200302713, 0.9333190897997542, 0.93330317074155345, 0.93328744002032349, 0.93327189461563409, 0.93325653158933297, 0.93324134808080106, 0.93322634130266124, 0.93321150853686519, 0.93319684713110129, 0.93318235449555065, 0.9331680280999255, 0.93315386547072199, 0.93313986418874251, 0.93312602188675708, 0.93311233624735934, 0.93309880500100284, 0.93308542592414512, 0.93307219683753451, 0.93305911560460253, 0.93304618012996132, 0.93303338835801819, 0.93302073827165199, 0.93300822789094728, 0.93299585527211382, 0.93298361850627931, 0.93297151571856196, 0.93295954506702272, 0.93294770474177757, 0.93293599296410357, 0.93292440798563137, 0.93291294808752179, 0.93290161157975526, 0.93289039680038477, 0.93287930211487458, 0.93286832591543145, 0.93285746662040148, 0.93284672267364621, 0.93283609254401045, 0.93282557472472005, 0.93281516773290829, 0.93280487010909074, 0.93279468041664859, 0.93278459724139695, 0.93277461919110694, 0.93276474489508954, 0.93275497300374843, 0.93274530218817309, 0.93273573113975783, 0.93272625856980651, 0.93271688320915513, 0.93270760380781959, 0.93269841913464502, 0.93268932797694126, 0.93268032914017385, 0.9326714214476477, 0.9326626037401532, 0.93265387487570461, 0.93264523372919905, 0.93263667919217264, 0.93262821017246811, 0.93261982559396528, 0.93261152439634587, 0.93260330553478976, 0.93259516797972419, 0.93258711071657774, 0.93257913274552517, 0.9325712330812419, 0.93256341075267313, 0.93255566480280649, 0.93254799428842128, 0.93254039827988189, 0.93253287586091116, 0.93252542612839384, 0.93251804819212147, 0.93251074117464916, 0.93250350421102934, 0.93249633644865004, 0.93248923704704612, 0.93248220517767, 0.9324752400237295, 0.93246834078002372, 0.93246150665269822, 0.9324547368591305, 0.93244803062772652, 0.93244138719774905, 0.93243480581915661, 0.93242828575241443, 0.93242182626838077, 0.93241542664809274, 0.93240908618263263, 0.93240280417297883, 0.93239657992984615, 0.9323904127735344, 0.93238430203377209, 0.93237824704961292, 0.93237224716922751, 0.93236630174982649, 0.9323604101574815, 0.93235457176701475, 0.93234878596184567, 0.93234305213387514, 0.93233736968334657, 0.93233173801873881, 0.93232615655661244, 0.93232062472150479, 0.93231514194581211, 0.93230970766966026, 0.93230432134079289, 0.93229898241446096, 0.93229369035330012, 0.93228844462723803, 0.93228324471336022, 0.93227809009580842, 0.93227298026569849, 0.93226791472098713, 0.93226289296639475, 0.93225791451326423, 0.93225297887951775, 0.93224808558950178, 0.93224323417394794, 0.93223842416982872, 0.93223365512028145, 0.93222892657454359, 0.93222423808781685, 0.93221958922120796, 0.93221497954163512, 0.9322104086217381, 0.9322058760397991, 0.93220138137965491, 0.93219692423061384, 0.93219250418738575, 0.93218812084999625, 0.93218377382368389, 0.93217946271887808, 0.9321751871510654, 0.93217094674076095, 0.93216674111339304, 0.93216256989928448, 0.93215843273351118, 0.93215432925590669, 0.93215025911093174, 0.93214622194764774, 0.93214221741962466, 0.93213824518488686, 0.9321343049058517, 0.93213039624925953, 0.93212651888609999, 0.93212267249157044, 0.93211885674502015, 0.93211507132984839, 0.93211131593349605, 0.93210759024735679, 0.93210389396673587, 0.93210022677624504, 0.93209658839054643, 0.93209297852303452, 0.93208939688767245, 0.93208584320057475, 0.93208231718066659, 0.93207881854077901, 0.93207534700962158, 0.93207190232384607, 0.93206848421956368, 0.93206509243426039, 0.93206172670770082, 0.93205838678234854, 0.93205507240353624, 0.93205178331951588, 0.93204851928147348, 0.93204528004350928, 0.93204206536260725, 0.93203887499861027, 0.93203570871419372, 0.93203256627481712, 0.93202944744873162, 0.93202635200690231, 0.93202327972299581, 0.93202023037338855, 0.93201720373706509, 0.93201419959564147, 0.93201121773331608, 0.93200825793684217, 0.93200531999548364, 0.93200240370101051, 0.93199950884763605, 0.93199663523201048, 0.93199378265318011, 0.93199095091254613, 0.93198813981386441, 0.93198534916317188, 0.93198257876880752, 0.93197982844132166, 0.93197709799349882, 0.93197438724030568, 0.93197169599886132, 0.93196902408841742, 0.93196637133031079, 0.93196373754796336, 0.93196112256682806, 0.93195852621438657, 0.93195594842946183, 0.93195338903718405, 0.93195084779311788, 0.93194832452000254, 0.93194581904939089, 0.93194333121591044, 0.93194086085661576, 0.93193840781091664, 0.93193597192065269, 0.9319335530300239, 0.93193115098553148, 0.93192876563587468, 0.93192639683184131, 0.93192404442619003, 0.93192170827354437, 0.9319193882303497, 0.9319170841547525, 0.93191479590655935, 0.9319125233472122, 0.93191026633968899, 0.93190802474853196, 0.93190579843975663, 0.93190358728086931, 0.93190139114081272, 0.93189920988996144, 0.93189704340008861, 0.93189489154435945, 0.93189275419729556, 0.93189063123478133, 0.93188852253401822, 0.93188642797351862, 0.93188434743311621, 0.9318822807939009, 0.93188022793824876, 0.9318781887497638, 0.93187616311331078, 0.93187415091494719, 0.93187215204195539, 0.93187016638279674, 0.93186819383851804, 0.93186623432228655, 0.93186428773582353, 0.93186235397124684, 0.93186043292195786, 0.93185852448254647, 0.93185662854873808, 0.93185474501605026, 0.93185287378214487, 0.93185101474713261, 0.93184916781061022, 0.93184733287328325, 0.93184550983693559, 0.93184369860444483, 0.9318418990797156, 0.93184011116766852, 0.93183833477422751, 0.93183656980628748, 0.93183481617170427, 0.93183307377928504, 0.93183134253876321, 0.93182962236079425, 0.93182791315694236, 0.93182621483965056, 0.93182452732225329, 0.93182285051896829, 0.93182118434485151, 0.93181952871581009, 0.93181788354875428, 0.93181624876182167, 0.93181462427372097, 0.93181301000372796, 0.93181140587179534, 0.93180981179865063, 0.93180822770578831, 0.93180665351550229, 0.93180508915082993, 0.93180353453556408, 0.93180198959424576, 0.93180045425215619, 0.93179892843528533, 0.93179741207033129, 0.93179590508472299, 0.93179440740656172, 0.93179291896464744, 0.93179143968845435, 0.93178996950818349, 0.93178850835472393, 0.93178705615957413, 0.93178561285479689, 0.93178417837306471, 0.93178275264770549, 0.93178133561269205, 0.93177992720261227, 0.93177852735267286, 0.93177713599870704, 0.93177575307714267, 0.931774378524999, 0.9317730122798803, 0.93177165427997777, 0.93177030446405362, 0.93176896277227106, 0.93176762916927958, 0.93176630359273915, 0.93176498597784763, 0.93176367626160661, 0.93176237438368692, 0.93176108028583582, 0.93175979391117925, 0.93175851520370412, 0.93175724410794247, 0.93175598056880404, 0.93175472453151487, 0.93175347594158464, 0.93175223474483915, 0.93175100088744645, 0.93174977431593031, 0.93174855497722675, 0.93174734281870264, 0.93174613778814708, 0.93174493983381657, 0.93174374887381972, 0.9317425648084231, 0.93174138769012083, 0.93174021745809288, 0.93173905405581126, 0.9317378974292172, 0.93173674752673696, 0.93173560429875546, 0.93173446769706258, 0.93173333767440469, 0.93173221418423768, 0.93173109718055347, 0.93172998661779138, 0.93172888245078622, 0.93172778463477668, 0.93172669312534973, 0.93172560787847369, 0.93172452885046753, 0.93172345599803186, 0.93172238927821749, 0.93172132864844037, 0.93172027406647828, 0.93171922549045205, 0.93171818287886776, 0.93171714619193624, 0.931716115391934, 0.93171509043890488, 0.93171407129302031, 0.93171305794148529, 0.93171205034712612, 0.93171104846812314, 0.93171005226296943, 0.93170906169098922, 0.93170807671236999, 0.93170709728799173, 0.93170612304178257, 0.93170515377532126, 0.93170418948533584, 0.93170323018577217, 0.93170227587610721, 0.9317013265439501, 0.93170038216715112, 0.93169944271562022, 0.93169850815293975, 0.93169757843787759, 0.93169665352558384, 0.93169573336856815, 0.93169481791758524, 0.93169390712220856, 0.93169300093136354, 0.93169209929373153, 0.93169120215798429, 0.93169030947307996, 0.93168942118835618, 0.93168853725370382, 0.93168765761961647, 0.93168678223726309, 0.93168591105854393, 0.9316850440360821, 0.93168418112325913, 0.93168332227421058, 0.93168246744386096, 0.93168161658787374, 0.93168076966265645, 0.93167992662538202, 0.93167908743392636, 0.9316782519530018, 0.93167742010265742, 0.93167659185799034, 0.93167576724918699, 0.93167494624766034, 0.93167412878360834, 0.9316733147818872, 0.9316725041654772, 0.9316716968579728, 0.93167089278523629, 0.93167009187640737, 0.93166929406448362, 0.93166849928652484, 0.93166770748374206, 0.93166691860136908, 0.93166613258849451, 0.93166534939783219, 0.93166456898547134, 0.93166379131060262, 0.93166301633530046, 0.93166224402421349, 0.93166147434440483, 0.93166070726508177, 0.93165994275740471, 0.93165918079432608, 0.93165842135038324, 0.93165766440157638, 0.93165690992521855, 0.93165615789980427, 0.93165540830490734, 0.93165466112106421, 0.93165391632969519, 0.93165317391301516, 0.93165243385395324, 0.93165169613609777, 0.93165096074363662, 0.93165022766127858, 0.93164949687424337, 0.93164876836819621, 0.93164804212919328, 0.93164731814368273, 0.93164659678848216, 0.93164587792675957, 0.93164516126357932, 0.93164444679291536, 0.93164373450715199, 0.93164302439681712, 0.93164231645139772, 0.93164161065981788, 0.93164090701077629, 0.93164020549288606, 0.93163950609481361, 0.93163880880530314, 0.93163811361321958, 0.93163742050756948, 0.9316367294775022, 0.93163604051232529, 0.93163535360147298, 0.93163466873452738, 0.93163398590121138], 'Rp': [0.96477769833978566, 0.96884168296274564, 0.97067702115063459, 0.97286425427801004, 0.97612010580069719, 0.97977551348646386, 0.98278532089140946, 0.98489509556151777, 0.98620992640874483, 0.98700554141048413, 0.98750679206070979, 0.98784857548791005, 0.98810028637798641, 0.98830062793412998, 0.98846013594216364, 0.98859757116020686, 0.98871993007680925, 0.98883140379663959, 0.98893503622628909, 0.98903320312358056, 0.98912829811079239, 0.98922069855337469, 0.98931284602698921, 0.98940623006641426, 0.98950244473835736, 0.98960334610902334, 0.9897107063188163, 0.98982553436784182, 0.98995115678055279, 0.99009018240415214, 0.99024490472400628, 0.99041757260373475, 0.99061024105991435, 0.99082663706306684, 0.99106794127175846, 0.99133618316145367, 0.99163308709187492, 0.9919573850079707, 0.99230721406748201, 0.99268190399949885, 0.99307848090420014, 0.99349195440818305, 0.99391386305795215, 0.99433141210432463, 0.99474184066244631, 0.9951326567229184, 0.99549461127943184, 0.99583617109387057, 0.99614370890050297, 0.99641711938619315, 0.99666248946842417, 0.99688150301735157, 0.99707522708729146, 0.99724491736120757, 0.9973926978683173, 0.99752056228509878, 0.99762938766380194, 0.99772161130881509, 0.99779934799167891, 0.99786498924101774, 0.99792021392967534, 0.99796669609465416, 0.99800588756296504, 0.99803897759226845, 0.99806695478828067, 0.99809042202985176, 0.99811003516567642, 0.99812679669996296, 0.99814114968513501, 0.99815350670211223, 0.99816419609572604, 0.99817348708904341, 0.99818160113506726, 0.99818872131758507, 0.99819500084763635, 0.99820056671294943, 0.99820552530546303, 0.998209965588718, 0.99821396194828027, 0.99821757670124278, 0.99822086237818564, 0.9982238636821742, 0.99822661930725298, 0.99822915722975558, 0.99823150345090605, 0.99823367961332854, 0.99823570606771683, 0.99823759887347263, 0.99823937126305884, 0.99824103505275275, 0.99824260145475541, 0.99824407857618047, 0.99824547408039566, 0.99824679464380772, 0.99824804611405726, 0.99824923364920304, 0.99825036181496962, 0.99825143463646604, 0.99825245574373744, 0.99825342842211573, 0.99825435563949261, 0.9982552400966419, 0.99825608430394697, 0.99825689064319734, 0.9982576610912246, 0.99825839759898438, 0.99825910195367185, 0.99825977589287207, 0.99826042103674884, 0.99826103890197204, 0.9982616309115464, 0.99826219840429997, 0.99826274264256332, 0.99826326481883709, 0.9982637660627699, 0.99826424744280184, 0.99826470997414862, 0.99826515462180809, 0.99826558230393614, 0.99826599324346599, 0.99826638879796326, 0.99826676988969387, 0.99826713655952104, 0.99826748935044074, 0.99826782980995121, 0.99826815859583529, 0.99826847634719174, 0.99826878341385128, 0.99826908078266019, 0.9982693688278913, 0.99826964808627694, 0.99826991908426665, 0.9982701823249982, 0.99827043828938455, 0.99827068743730507, 0.99827093020899815, 0.99827116702642371, 0.998271398295137, 0.99827162442609163, 0.99827184579238781, 0.99827206275384639, 0.99827227565834009, 0.99827248485439113, 0.99827269067505875, 0.99827289343628567, 0.99827309344555704, 0.99827329101048312, 0.99827348643890945, 0.99827368001907191, 0.99827387219940278, 0.99827406127668061, 0.99827424873688908, 0.99827443524810255, 0.99827462111413956, 0.99827480663284829, 0.99827499210965531, 0.99827517782795672, 0.99827536405224016, 0.99827555107738741, 0.99827573917183887, 0.99827592866356829, 0.99827612001673971, 0.99827631355871471, 0.99827650958033876, 0.99827670835663063, 0.99827691015511189, 0.99827711523987939, 0.99827732387470647, 0.99827753632490424, 0.99827775285904075, 0.99827797375037441, 0.99827819927789507, 0.99827842972738245, 0.99827866539241683, 0.9982789065754154, 0.99827915348586727, 0.99827940606865206, 0.99827966432693693, 0.99827992933604415, 0.99828020146650798, 0.99828048109399348, 0.99828076860453496, 0.99828106440316844, 0.99828136891502206, 0.9982816825839802, 0.99828200587258198, 0.99828233923785747, 0.99828268321600799, 0.99828303837747057, 0.99828340533289661, 0.99828378471897228, 0.99828417719853157, 0.99828458344724824, 0.99828500420006239, 0.99828544021802335, 0.9982858922975254, 0.99828636127630488, 0.99828684803728329, 0.99828735357721254, 0.99828787894473237, 0.99828842518875305, 0.99828899342035726, 0.9982895848174097, 0.99829020062814922, 0.99829084217229791, 0.99829151084872536, 0.9982922081448482, 0.99829293564020505, 0.99829369501271836, 0.9982944880458674, 0.99829530638646635, 0.99829615156907492, 0.99829703463181463, 0.99829795775875829, 0.99829892316155977, 0.99829993220508118, 0.99830098906529285, 0.99830209638372602, 0.99830325667783582, 0.99830447246470833, 0.99830574783663739, 0.99830708628389286, 0.99830849151557699, 0.99830996748657364, 0.99831151834659237, 0.9983131497113078, 0.99831486469329045, 0.99831666689467735, 0.99831856183057432, 0.99832055538877884, 0.99832265380049323, 0.99832486364542994, 0.99832719186701613, 0.99832964579827688, 0.99833223708352303, 0.99833496578922432, 0.99833783813525834, 0.99834086361668517, 0.99834405221589462, 0.99834741425847739, 0.99835096043759142, 0.99835471963211786, 0.99835866196987699, 0.99836279119165339, 0.99836710036421261, 0.99837160687121806, 0.9983763335885516, 0.99838125616600337, 0.99838635423735578, 0.99839170484560946, 0.99839732925973035, 0.99840324689601323, 0.99840946635929628, 0.99841599820269644, 0.99842285720806234, 0.99843005598359535, 0.99843760701992923, 0.99844552690913979, 0.99845382273221472, 0.99846250006210724, 0.99847156622558686, 0.99848103010770939, 0.99849089735939711, 0.99850115592019195, 0.99851178341442814, 0.99852280495379331, 0.99853421617171756, 0.99854598156551178, 0.99855804737890408, 0.99857044024341235, 0.99858316240684997, 0.99859620205087363, 0.99860952625143318, 0.99862311880301402, 0.99863694950052928, 0.99865099338740659, 0.99866521662202901, 0.99867956781573874, 0.99869400317347246, 0.99870849428211717, 0.99872299512325247, 0.9987375089215389, 0.99875197296999285, 0.99876636650976491, 0.99878068618565696, 0.99879489795314413, 0.99880897086707443, 0.99882287449137719, 0.99883658067327874, 0.9988500634780928, 0.99886329920689321, 0.99887626633265936, 0.99888894496763025, 0.99890128756972718, 0.99891330476007134, 0.99892499378327837, 0.9989363450910459, 0.99894735063445417, 0.99895800381454514, 0.99896829944653465, 0.99897823015437182, 0.99898779391610804, 0.99899698992849173, 0.9990058203468406, 0.99901424408365003, 0.99902219159518912, 0.99902973993406485, 0.99903693411189576, 0.99904377400217703, 0.99905027905604016, 0.99905647435020695, 0.99906236981681418, 0.99906797699359229, 0.99907330547395412, 0.99907836444524478, 0.99908316328689939, 0.99908771174012656, 0.99909201978234652, 0.99909610144607497, 0.99909996762461228, 0.99910362470802161, 0.9991070843777754, 0.99911035703303352, 0.99911345237364224, 0.99911638125984237, 0.99911915595442913, 0.99912178628890824, 0.9991242806714713, 0.99912663345218511, 0.9991288587039705, 0.9991309707114816, 0.99913297731366535, 0.99913488502537773, 0.99913669964136365, 0.99913842633836614, 0.99914006996483296, 0.99914163513515641, 0.99914312606240108, 0.99914454676888886, 0.99914590130882841, 0.99914719323171952, 0.99914842557105799, 0.99914960171513778, 0.99915072479306044, 0.99915179769662688, 0.99915282333351185, 0.99915379823131334, 0.99915473016678991, 0.99915562168467842, 0.99915647520370821, 0.99915729290994615, 0.99915807683377289, 0.99915882891736019, 0.99915955092145614, 0.99916024444330598, 0.99916091015537778, 0.99916154917006705, 0.99916216376981992, 0.99916275525348552, 0.99916332487904402, 0.99916387380540039, 0.99916440308748766, 0.99916491376836247, 0.99916540681980115, 0.99916588314808275, 0.99916634359939094, 0.99916678897166156, 0.99916722000765712, 0.99916763739952397, 0.99916804094784539, 0.99916842456057864, 0.99916879274857473, 0.9991691519792536, 0.99916950206387412, 0.99916984295169931, 0.99917017479406667, 0.99917049785497913, 0.99917081244521389, 0.99917111888512378, 0.99917141748566873, 0.9991717085488957, 0.99917199235892407, 0.99917226918260282, 0.99917253927072458, 0.99917280279884335, 0.99917305998815797, 0.99917331106560858, 0.99917355625244142, 0.99917379574923615, 0.99917402975084058, 0.99917425843975594, 0.9991744819880618, 0.99917470055832758, 0.99917491430442085, 0.99917512337215397, 0.99917532790339125, 0.99917552805054555, 0.99917572396172682, 0.99917591577471909, 0.99917610364191789, 0.99917628768712019, 0.99917646802160043, 0.99917664474581114, 0.99917681795390811, 0.99917698773480268, 0.99917715417293829, 0.99917731734891702, 0.99917747733939899, 0.9991776342133738, 0.99917778803076107, 0.99917793885091621, 0.9991780867353609, 0.99917823174705511, 0.99917837394872966, 0.99917851340212105, 0.99917865016759144, 0.99917878398699989, 0.99917891508572754, 0.99917904368059174, 0.99917916971869181, 0.99917929328166621, 0.9991794142549546, 0.99917953244955071, 0.99917964817998428, 0.99917976157311361, 0.99917987273089248, 0.99917998173747558, 0.99918008866350172, 0.99918019356975074, 0.99918029650964491, 0.9991803975312562, 0.99918049667871955, 0.99918059399320558, 0.9991806895137717, 0.99918078327773219, 0.99918087532111122, 0.99918096567880099, 0.99918105438475235, 0.99918114147204373, 0.99918122697297118, 0.99918131091906448, 0.99918139334115907, 0.99918147426937909, 0.99918155373317752, 0.99918163176135466, 0.99918170838210107, 0.99918178362295273, 0.99918185751087552, 0.99918193007223088, 0.99918200133305668, 0.99918207131848846, 0.99918214005259054, 0.99918220755934362, 0.99918227386315772, 0.99918233898794329, 0.99918240295706207, 0.99918246579339842, 0.9991825275192916, 0.9991825881568388, 0.99918264772712551, 0.9991827062510048, 0.99918276374893977, 0.99918282024085114, 0.99918287574624465, 0.99918293028417937, 0.99918298387332194, 0.99918303653187157, 0.99918308827772784, 0.99918313912842038, 0.99918318910108728, 0.99918323821253852, 0.99918328646697741, 0.99918333387324376, 0.99918338045011701, 0.99918342621570977, 0.99918347118766904, 0.99918351538316008, 0.99918355881887677, 0.99918360151106589, 0.99918364347550703, 0.9991836847275285, 0.99918372528199995, 0.99918376515336127, 0.99918380435216003, 0.99918384289206053, 0.99918388078356024, 0.99918391803830209, 0.99918395466869425, 0.99918399068731356, 0.99918402610665524, 0.9991840609389947, 0.99918409519632567, 0.99918412889034292, 0.99918416203393523, 0.99918419464270347, 0.99918422673427476, 0.99918425831987434, 0.99918428940957638, 0.99918432001296786, 0.99918435013934481, 0.99918437979781971, 0.99918440899725758, 0.99918443774636323, 0.99918446605360267, 0.99918449392727016, 0.99918452137543579, 0.99918454840598137, 0.9991845750266235, 0.99918460124488828, 0.999184627070413, 0.99918465251239563, 0.99918467757698559, 0.99918470227021561, 0.99918472659833346, 0.99918475056836453, 0.99918477418707086, 0.99918479746070732, 0.99918482039555412, 0.99918484299736987, 0.99918486527678663, 0.99918488724065013, 0.99918490889406864, 0.99918493024221899, 0.99918495129036211, 0.99918497204381507, 0.9991849925078542, 0.99918501268774729, 0.99918503258863622, 0.99918505221559273, 0.9991850715735503, 0.9991850906673363, 0.9991851095016383, 0.99918512808102866, 0.99918514640995892, 0.99918516449378747, 0.99918518233854492, 0.99918519994867727, 0.99918521732854837, 0.99918523448237295, 0.99918525141428594, 0.99918526812825381, 0.99918528462812273, 0.99918530091762137, 0.99918531700037405, 0.9991853328798711, 0.99918534855953001, 0.99918536404264391, 0.99918537933243212, 0.99918539443359755, 0.99918540934967492, 0.99918542408385447, 0.99918543863929787, 0.99918545301903583, 0.99918546722603796, 0.99918548126318041, 0.99918549513325394, 0.9991855088389745, 0.99918552238297342, 0.99918553576782598, 0.99918554899600165, 0.9991855620699599, 0.99918557499206051, 0.99918558776461774, 0.9991856003898919, 0.99918561287008645, 0.99918562520736531, 0.99918563740384025, 0.99918564946156974, 0.99918566138258924, 0.99918567316886187, 0.99918568482234404, 0.99918569634492505, 0.99918570773847892, 0.99918571900482778, 0.99918573014576406, 0.9991857411630598, 0.99918575205843507, 0.99918576283358429, 0.99918577349018356, 0.99918578402985248, 0.99918579445420985, 0.99918580476483698, 0.99918581496328474, 0.99918582505107811, 0.99918583502970904, 0.99918584490067164, 0.99918585466540022, 0.99918586432532353, 0.99918587388184887, 0.99918588333636271, 0.99918589269021474, 0.99918590194474211, 0.9991859111012682, 0.99918592016108188, 0.99918592912545279, 0.99918593799563959, 0.99918594677287631, 0.99918595545837707, 0.99918596405333981, 0.99918597255894648, 0.99918598097634881, 0.99918598930670055, 0.9991859975511127, 0.99918600571070504, 0.9991860137865588, 0.99918602177975846, 0.99918602969135806, 0.99918603752239998, 0.99918604527391275, 0.99918605294690477, 0.99918606054239001, 0.99918606806132393, 0.99918607550468841, 0.9991860828734408, 0.99918609016851412, 0.99918609739083308, 0.9991861045413144, 0.99918611162086024, 0.99918611863034512, 0.99918612557065001, 0.99918613244263654, 0.99918613924714583, 0.99918614598501909, 0.99918615265707333, 0.99918615926412768, 0.99918616580698427, 0.99918617228642304, 0.99918617870322168, 0.99918618505814794, 0.99918619135196773, 0.9991861975854105, 0.99918620375921219, 0.9991862098740999, 0.9991862159307956, 0.99918622192998796, 0.99918622787237332, 0.9991862337586338, 0.99918623958945796, 0.99918624536549216, 0.99918625108739967, 0.9991862567558194, 0.9991862623714044, 0.99918626793476018, 0.99918627344651512, 0.9991862789072794, 0.99918628431766443, 0.99918628967825363, 0.99918629498963718, 0.9991863002523923, 0.99918630546707421, 0.99918631063425489, 0.99918631575449213, 0.99918632082833281, 0.9991863258563054, 0.9991863308389366, 0.99918633577677829, 0.99918634067031609, 0.99918634552006991, 0.99918635032655079, 0.99918635509024101, 0.99918635981163695, 0.99918636449121934, 0.99918636912945824, 0.99918637372683416, 0.99918637828380408, 0.99918638280082595, 0.99918638727834252, 0.99918639171680923, 0.99918639611665583, 0.99918640047832619, 0.99918640480223297, 0.99918640908880463, 0.99918641333845248, 0.99918641755160031, 0.99918642172863126, 0.99918642586995332, 0.99918642997595863, 0.99918643404705121, 0.99918643808359209, 0.99918644208595875, 0.99918644605454876, 0.99918644998971395, 0.99918645389181326, 0.99918645776121029, 0.99918646159826663, 0.99918646540331901, 0.99918646917671905, 0.99918647291881169, 0.99918647662991977, 0.99918648031037904, 0.9991864839605209, 0.99918648758066442, 0.99918649117113278, 0.99918649473224119, 0.9991864982642924, 0.99918650176759827, 0.99918650524245445, 0.99918650868916636, 0.999186512108031, 0.99918651549933424, 0.99918651886336551, 0.9991865222004026, 0.99918652551073628, 0.99918652879463798, 0.99918653205237162, 0.99918653528422074, 0.99918653849044181, 0.99918654167133769, 0.99918654482712521, 0.9991865479580605, 0.99918655106439214, 0.99918655414636048, 0.99918655720420857, 0.99918656023821217, 0.99918656324860111, 0.99918656623559488, 0.99918656919942062, 0.99918657214030349, 0.99918657505847674, 0.99918657795415577, 0.99918658082756562, 0.99918658367893542, 0.99918658650847503, 0.99918658931641047, 0.99918659210295113, 0.99918659486830397, 0.99918659761267581, 0.9991866003362766, 0.99918660303930196, 0.99918660572197204, 0.99918660838444706, 0.99918661102696216, 0.99918661364968653, 0.99918661625282557, 0.99918661883655535, 0.99918662140106562, 0.9991866239465419, 0.99918662647316603, 0.99918662898112554, 0.99918663147058062, 0.99918663394172125, 0.99918663639471161, 0.99918663882972991, 0.99918664124693879, 0.99918664364652221, 0.99918664602862362, 0.99918664839342008, 0.99918665074106361, 0.9991866530717266, 0.99918665538554952, 0.99918665768270665, 0.99918665996333544, 0.99918666222759622, 0.9991866644756533, 0.99918666671147449, 0.99918666892734198, 0.99918667112683401, 0.9991866733107454, 0.99918667547920714, 0.99918667763235813, 0.99918667977035125, 0.99918668189331283, 0.99918668400138066, 0.9991866860946883, 0.99918668817336997, 0.99918669023755091, 0.9991866922873569, 0.99918669432292584, 0.99918669634437307, 0.99918669835180962, 0.99918670034537638, 0.99918670232518736, 0.99918670429136502, 0.99918670624401429, 0.99918670818325905, 0.99918671010922111, 0.99918671202200748, 0.99918671392173541, 0.99918671580852114, 0.99918671768246192, 0.99918671954367788, 0.99918672139227582, 0.99918672322836721, 0.99918672505205486, 0.99918672686344123, 0.99918672866263492, 0.99918673044974349, 0.9991867322248672, 0.99918673398809243, 0.99918673573954031, 0.99918673747930387, 0.9991867392074778, 0.99918674092415216, 0.99918674262950669, 0.99918674432359067, 0.99918674600641166, 0.99918674767807913, 0.99918674933868545, 0.99918675098832643, 0.99918675262710421, 0.99918675425508963, 0.99918675587237826, 0.99918675747906649, 0.99918675907524146, 0.99918676066098555, 0.99918676223638625, 0.99918676380153171, 0.99918676535650519, 0.99918676690137687, 0.99918676843624565, 0.99918676996117795, 0.99918677147626767, 0.99918677298158165, 0.99918677447720372, 0.99918677596321037, 0.99918677743968953, 0.99918677890670504, 0.99918678036433506, 0.99918678181265652, 0.99918678325173371, 0.999186784681659, 0.99918678610249678, 0.99918678751430279, 0.99918678891716939, 0.99918679031115498, 0.99918679169633595, 0.99918679307277736, 0.99918679444054459, 0.99918679579970882, 0.9991867971503392, 0.99918679849250513, 0.99918679982626191, 0.99918680115167613, 0.99918680246882507, 0.99918680377776048, 0.99918680507855051, 0.99918680637124901, 0.99918680765594159, 0.99918680893265777, 0.99918681020149047, 0.9991868114624709, 0.99918681271567233, 0.99918681396116416, 0.99918681519898656, 0.9991868164292067, 0.99918681765187478, 0.99918681886706717, 0.99918682007481319, 0.9991868212751891, 0.99918682246824109, 0.999186823654034, 0.99918682483260446, 0.99918682600402309, 0.99918682716833718, 0.99918682832558603, 0.99918682947544768, 0.99918683061819979, 0.99918683175399758, 0.9991868328829232, 0.99918683400506136, 0.99918683512046846, 0.99918683622920768, 0.99918683733130254, 0.99918683842680533, 0.99918683951577214, 0.99918684059824081, 0.99918684167426464, 0.99918684274389047, 0.99918684380714773, 0.99918684486409182, 0.99918684591477469, 0.99918684695922833, 0.99918684799751545, 0.99918684902966248, 0.99918685005583063, 0.99918685107633143, 0.99918685209073321, 0.99918685309913213, 0.99918685410159536, 0.99918685509816307, 0.99918685608887969, 0.9991868570737954, 0.99918685805295548, 0.99918685902640647, 0.99918685999417889, 0.99918686095632003, 0.99918686191286565, 0.9991868628638636, 0.99918686380935706, 0.99918686474936802, 0.9991868656839511, 0.99918686661313805, 0.99918686753696739, 0.99918686845548454, 0.99918686936871415, 0.99918687027670916, 0.99918687117949456, 0.99918687207710422, 0.99918687296957487, 0.99918687385695892, 0.99918687473926793, 0.99918687561654118, 0.99918687648866678, 0.99918687735570277, 0.99918687821773733, 0.99918687907483494, 0.9991868799270418, 0.99918688077440287, 0.99918688161694225, 0.99918688245650666, 0.99918688329188765, 0.99918688412272116, 0.99918688494903152, 0.99918688577086723, 0.99918688658827559, 0.9991868874012918, 0.99918688820997859, 0.99918688901437858, 0.99918688981455483, 0.99918689061054067, 0.99918689140238293, 0.99918689219015533, 0.99918689297388408, 0.99918689375362346, 0.99918689452942333, 0.99918689530133986, 0.99918689606939148, 0.99918689683365747, 0.9991868975941538, 0.99918689835094265, 0.99918689910405767, 0.99918689985354525, 0.99918690059944637, 0.99918690134179944, 0.99918690208064653, 0.99918690281602196, 0.99918690354797812, 0.99918690427653867, 0.9991869050017449, 0.99918690572363511, 0.99918690644248209, 0.99918690715840774, 0.99918690787135778, 0.99918690858123715, 0.99918690928801468, 0.99918690999172188, 0.9991869106924175, 0.99918691139016524, 0.99918691208503041, 0.99918691277707494, 0.99918691346636723, 0.99918691415296113, 0.99918691483691002, 0.99918691551828642, 0.99918691619712718, 0.99918691687349226, 0.99918691754741651, 0.99918691821893435, 0.99918691888809164, 0.99918691955491312, 0.99918692021945443, 0.99918692088171701, 0.99918692154174515, 0.99918692219956262, 0.99918692285517829, 0.99918692350863658, 0.99918692415994315, 0.99918692480913152, 0.99918692545620014, 0.99918692610119375, 0.99918692674410148, 0.99918692738495229, 0.99918692802376718, 0.99918692866055892, 0.99918692929532449, 0.9991869299280991, 0.99918693055889241, 0.99918693118770396, 0.99918693181455964, 0.99918693243946932, 0.99918693306243267, 0.99918693368286593, 0.99918693430103134, 0.99918693491742339, 0.99918693553202098, 0.99918693614478349, 0.99918693675569448, 0.99918693736475128, 0.99918693797196978, 0.99918693857735552, 0.99918693918090795, 0.99918693978265571, 0.99918694038257339, 0.99918694098070848, 0.99918694157704591, 0.99918694217160664, 0.99918694276439368, 0.99918694335541669, 0.99918694394470442, 0.99918694453222878]}
icm_all_performances = eval(open(folder+'nmtf_icm_performances.txt','r').read())
# Assemble the average performances and method names
methods = ['VB-NMTF', 'G-NMTF', 'ICM-NMTF', 'NP-NMTF']
avr_performances = [
vb_all_performances,
gibbs_all_performances,
icm_all_performances,
np_all_performances
]
colours = ['r','b','g','c']
for metric in metrics:
fig = plt.figure(figsize=(1.9,1.5))
fig.subplots_adjust(left=0.12, right=0.95, bottom=0.17, top=0.95)
#plt.title("Performances (%s) for different fractions of missing values" % metric)
plt.xlabel("Iterations", fontsize=8, labelpad=0)
plt.ylabel(metric, fontsize=8, labelpad=-1)
plt.yticks(range(0,MSE_max+1),fontsize=6)
plt.xticks(fontsize=6)
x = iterations
for method, avr_performance,colour in zip(methods,avr_performances,colours):
y = avr_performance[metric][0:len(iterations)]
#plt.plot(x,y,label=method)
plt.plot(x,y,linestyle='-', marker=None, label=method, c=colour)
if metric == 'MSE':
plt.ylim(0,MSE_max)
elif metric == 'R^2' or metric == 'Rp':
plt.ylim(0,1)
plt.savefig("../graphs_toy/mse_nmtf_convergences.png", dpi=600)
#plt.savefig("/home/tab43/Dropbox/Posters/Poster NIPS AABI 2016 v2 png/images/mse_nmtf_convergences.png", dpi=1200)
| 3,520.157143
| 61,547
| 0.850932
|
d02e1d39755bf4783cd5dbdc2b88ca0931e02874
| 3,769
|
py
|
Python
|
tests/transformation/streamline/test_remove_identity_ops.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 283
|
2019-09-26T10:09:34.000Z
|
2022-03-09T16:36:23.000Z
|
tests/transformation/streamline/test_remove_identity_ops.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 238
|
2019-10-04T12:20:26.000Z
|
2022-03-31T04:50:53.000Z
|
tests/transformation/streamline/test_remove_identity_ops.py
|
AlexMontgomerie/finn
|
ec5f67b333ad4db4acf6191c3b5ab5e9067347aa
|
[
"BSD-3-Clause"
] | 144
|
2019-09-23T13:46:14.000Z
|
2022-03-18T12:55:07.000Z
|
import pytest
import numpy as np
from onnx import helper, TensorProto
import finn.core.onnx_exec as oxe
from finn.core.datatype import DataType
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.infer_datatypes import InferDataTypes
from finn.transformation.infer_shapes import InferShapes
from finn.transformation.streamline.remove import RemoveIdentityOps
from finn.util.basic import gen_finn_dt_tensor
def insert_identity_op(model, op, as_first_node, approx):
if approx:
zero_val = 0.000001
one_val = 0.999999
else:
zero_val = 0.0
one_val = 1.0
if op in ["Add", "Sub"]:
val = np.asarray([zero_val], dtype=np.float32)
elif op in ["Mul", "Div"]:
val = np.asarray([one_val], dtype=np.float32)
else:
return
graph = model.graph
if as_first_node:
identity_node = helper.make_node(op, ["inp", "value"], ["ident_out"])
graph.node.insert(0, identity_node)
graph.node[1].input[0] = "ident_out"
else:
identity_node = helper.make_node(op, ["div_out", "value"], ["ident_out"])
graph.node.insert(3, identity_node)
graph.node[-1].input[0] = "ident_out"
model.set_initializer("value", val)
return model
# identity operations to be inserted
@pytest.mark.parametrize("op", ["Add", "Sub", "Mul", "Div"])
@pytest.mark.parametrize("approx", [False, True])
@pytest.mark.parametrize("as_first_node", [False, True])
def test_remove_identity_ops(op, as_first_node, approx):
# set up onnx model
inp = helper.make_tensor_value_info("inp", TensorProto.FLOAT, [1, 4, 1, 1])
mul = helper.make_tensor_value_info("mul", TensorProto.FLOAT, [])
shape = helper.make_tensor_value_info("shape", TensorProto.FLOAT, [2])
div = helper.make_tensor_value_info("div", TensorProto.FLOAT, [])
matmul = helper.make_tensor_value_info("matmul", TensorProto.FLOAT, [4, 2])
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, 2])
mul_node = helper.make_node("Mul", ["inp", "mul"], ["mul_out"])
reshape_node = helper.make_node("Reshape", ["mul_out", "shape"], ["reshape_out"])
div_node = helper.make_node("Div", ["reshape_out", "div"], ["div_out"])
matmul_node = helper.make_node("MatMul", ["div_out", "matmul"], ["outp"])
graph = helper.make_graph(
nodes=[mul_node, reshape_node, div_node, matmul_node],
name="identity-graph",
inputs=[inp],
outputs=[outp],
value_info=[mul, shape, div, matmul],
)
model = helper.make_model(graph, producer_name="mulpastconv-model")
model = ModelWrapper(model)
inp_values = gen_finn_dt_tensor(DataType.INT2, [1, 4, 1, 1])
mul_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32)
shape_values = np.asarray([1, -1], dtype=np.int64)
div_values = np.random.uniform(low=0.1, high=0.99, size=(1)).astype(np.float32)
matmul_values = gen_finn_dt_tensor(DataType.INT2, [4, 2])
model.set_initializer("mul", mul_values)
model.set_initializer("shape", shape_values)
model.set_initializer("div", div_values)
model.set_initializer("matmul", matmul_values)
insert_identity_op(model, op, as_first_node, approx)
model = model.transform(InferShapes())
model = model.transform(InferDataTypes())
idict = {"inp": inp_values}
odict = oxe.execute_onnx(model, idict)
out_before = odict["outp"]
num_of_nodes_before = len(model.graph.node)
model = model.transform(RemoveIdentityOps())
num_of_nodes_after = len(model.graph.node)
assert num_of_nodes_before - 1 == num_of_nodes_after
odict = oxe.execute_onnx(model, idict)
out_after = odict["outp"]
assert np.isclose(out_before, out_after, atol=1e-3).all()
| 39.673684
| 85
| 0.685328
|
8c05be0e61b57ba15d27106b3a9fc8826f2edb24
| 758
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/minimum-skips-to-arrive-at-meeting-on-time.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/minimum-skips-to-arrive-at-meeting-on-time.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/minimum-skips-to-arrive-at-meeting-on-time.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n^2)
# Space: O(n)
class Solution(object):
def minSkips(self, dist, speed, hoursBefore):
"""
:type dist: List[int]
:type speed: int
:type hoursBefore: int
:rtype: int
"""
def ceil(a, b):
return (a+b-1)//b
dp = [0]*((len(dist)-1)+1) # dp[i]: (min time by i skips) * speed
for i, d in enumerate(dist):
for j in reversed(xrange(len(dp))):
dp[j] = ceil(dp[j]+d, speed)*speed if i < len(dist)-1 else dp[j]+d
if j-1 >= 0:
dp[j] = min(dp[j], dp[j-1]+d)
target = hoursBefore*speed
for i in xrange(len(dist)):
if dp[i] <= target:
return i
return -1
| 29.153846
| 82
| 0.455145
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.