hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
39fb4c6ca9ae28efcb16046a1e30c905f41cb094
| 2,895
|
py
|
Python
|
debug_toolbar/toolbar/loader.py
|
philz/django-debug-toolbar
|
aa07a663cb7b94cf626540112d35f991e5195deb
|
[
"BSD-3-Clause"
] | 4
|
2015-12-16T22:21:14.000Z
|
2016-05-09T14:18:20.000Z
|
debug_toolbar/toolbar/loader.py
|
boykun/django-debug-toolbar
|
08b6a97d759e9808962cafd5a5ef3bab3642ccdb
|
[
"BSD-3-Clause"
] | null | null | null |
debug_toolbar/toolbar/loader.py
|
boykun/django-debug-toolbar
|
08b6a97d759e9808962cafd5a5ef3bab3642ccdb
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The main DebugToolbar class that loads and renders the Toolbar.
"""
from django.template.loader import render_to_string
class DebugToolbar(object):
def __init__(self, request):
self.request = request
self.panels = []
self.config = {
'INTERCEPT_REDIRECTS': True,
}
# Override this tuple by copying to settings.py as `DEBUG_TOOLBAR_PANELS`
self.default_panels = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
#'debug_toolbar.panels.cache.CacheDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
self.load_panels()
def load_panels(self):
"""
Populate debug panels
"""
from django.conf import settings
from django.core import exceptions
# Check if settings has a DEBUG_TOOLBAR_PANELS, otherwise use default
if hasattr(settings, 'DEBUG_TOOLBAR_PANELS'):
self.default_panels = settings.DEBUG_TOOLBAR_PANELS
# Check if settings has a DEBUG_TOOLBAR_CONFIG and updated config
if hasattr(settings, 'DEBUG_TOOLBAR_CONFIG'):
self.config.update(settings.DEBUG_TOOLBAR_CONFIG)
for panel_path in self.default_panels:
try:
dot = panel_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a debug panel module' % panel_path
panel_module, panel_classname = panel_path[:dot], panel_path[dot+1:]
try:
mod = __import__(panel_module, {}, {}, [''])
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing debug panel %s: "%s"' % (panel_module, e)
try:
panel_class = getattr(mod, panel_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Toolbar Panel module "%s" does not define a "%s" class' % (panel_module, panel_classname)
try:
panel_instance = panel_class()
except:
print panel_class
raise # Bubble up problem loading panel
self.panels.append(panel_instance)
def render_toolbar(self):
"""
Renders the overall Toolbar with panels inside.
"""
return render_to_string('debug_toolbar/base.html', {
'panels': self.panels,
'BASE_URL': self.request.META.get('SCRIPT_NAME', ''),
})
| 39.657534
| 145
| 0.620725
|
f14e31dc8302ae83fefc9a4de3bb7e919eb10e8f
| 1,457
|
py
|
Python
|
src/beginningMetal/11_AmbientLighting/Final/pyMetal/mScene.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | 1
|
2021-08-05T04:31:02.000Z
|
2021-08-05T04:31:02.000Z
|
src/beginningMetal/11_AmbientLighting/Final/pyMetal/mScene.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | 2
|
2021-08-14T03:33:12.000Z
|
2021-11-11T06:25:01.000Z
|
src/beginningMetal/11_AmbientLighting/Final/pyMetal/mScene.py
|
pome-ta/pystaMetalStudy
|
530248ad8621ec951fcbaf450ebd26ac2752e540
|
[
"MIT"
] | null | null | null |
import ctypes
from .camera import Camera
from .mNode import Node
from .pyTypes import SceneConstants, Light
class Scene(Node):
def __init__(self, device, size):
super().__init__()
self.device = device
self.size = size
self.camera = Camera()
self.sceneConstants = SceneConstants()
self.light = Light()
self.deltaTime = None
# todo: self.size = bounds(x, y, width, height)
self.camera.aspect = self.size[2] / self.size[3]
self.camera.position.z = -6.0
self.add_childNode_(self.camera)
def update_deltaTime_(self, deltaTime):
pass
def render_commandEncoder_deltaTime_(self, commandEncoder, deltaTime):
self.update_deltaTime_(deltaTime)
self.sceneConstants.projectionMatrix = self.camera.projectionMatrix()
commandEncoder.setFragmentBytes_length_atIndex_(
ctypes.byref(self.light),
ctypes.sizeof(Light), 3)
# print(ctypes.byref(self.light))
# print(ctypes.sizeof(Light))
commandEncoder.setVertexBytes_length_atIndex_(
ctypes.byref(self.sceneConstants),
ctypes.sizeof(SceneConstants), 2)
# print(ctypes.byref(self.sceneConstants))
# print(ctypes.sizeof(SceneConstants))
for child in self.children:
child.render_commandEncoder_parentModelViewMatrix_(
commandEncoder, self.camera.viewMatrix())
def sceneSizeWillChange_size_(self, size):
width, height = size
self.camera.aspect = width / height
| 29.734694
| 73
| 0.706932
|
fcbce2436f0dc65683bb8ac87d5c24907e05698c
| 509
|
py
|
Python
|
algorithm/680-valid-palindrome-2/solution2.py
|
tonylixu/devops
|
c820eb396569e1dc8dad7bd682e37498dde081ba
|
[
"Apache-2.0"
] | null | null | null |
algorithm/680-valid-palindrome-2/solution2.py
|
tonylixu/devops
|
c820eb396569e1dc8dad7bd682e37498dde081ba
|
[
"Apache-2.0"
] | null | null | null |
algorithm/680-valid-palindrome-2/solution2.py
|
tonylixu/devops
|
c820eb396569e1dc8dad7bd682e37498dde081ba
|
[
"Apache-2.0"
] | null | null | null |
def valid_palindrome(s):
"""
:type s: str
:rtype: bool
"""
lo, hi = 0, len(s) - 1
while lo < hi:
if s[lo] != s[hi]:
s1 = s[:lo] + s[lo+1:]
s2 = s[:hi] + s[hi+1:]
return is_palindrome(s1) or is_palindrome(s2)
lo += 1
hi -= 1
return True
def is_palindrome(s):
if s == s[::-1]:
return True
else:
return False
if __name__ == '__main__':
s = 'cbbcc'
print valid_palindrome(s)
| 20.36
| 57
| 0.449902
|
2fa68c92f903edacace4942da6b878ab3e1e79db
| 790
|
py
|
Python
|
apps/users/views/login.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 2
|
2019-07-09T01:42:04.000Z
|
2020-04-09T16:44:59.000Z
|
apps/users/views/login.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 5
|
2019-12-30T22:16:38.000Z
|
2020-09-11T18:13:14.000Z
|
apps/users/views/login.py
|
deniskrumko/deniskrumko
|
613c0c3eac953d2e8482a2e66fce7d3570770b2c
|
[
"MIT"
] | 1
|
2019-07-09T01:42:07.000Z
|
2019-07-09T01:42:07.000Z
|
from django.contrib.auth import authenticate, login
from core.views import BaseView
class LoginView(BaseView):
"""View for users to log in."""
menu = 'user'
template_name = 'users/login.html'
title = 'DK - Главная'
description = 'DK - Войти'
def post(self, request):
"""Get username and password to authenticate user."""
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
next_redirect = self.request.GET.get('next')
return self.redirect(next_redirect or 'main:index', use_reverse=not next_redirect)
return self.redirect('users:login')
| 29.259259
| 94
| 0.650633
|
5f450bd03290524368ff21baba792f8c2e5425a8
| 3,226
|
py
|
Python
|
prepare-commit-msg.py
|
apiechowicz/git-hooks
|
a3b26fe8d8395293d932f3d5df80fcf3a06e0caa
|
[
"Apache-2.0"
] | 1
|
2018-08-11T19:31:25.000Z
|
2018-08-11T19:31:25.000Z
|
prepare-commit-msg.py
|
apiechowicz/git-hooks
|
a3b26fe8d8395293d932f3d5df80fcf3a06e0caa
|
[
"Apache-2.0"
] | 2
|
2018-08-11T15:50:34.000Z
|
2018-08-11T22:34:17.000Z
|
prepare-commit-msg.py
|
apiechowicz/git-hooks
|
a3b26fe8d8395293d932f3d5df80fcf3a06e0caa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
from re import fullmatch
from subprocess import check_output
SKIP_REBASE_COMMITS = True
SKIP_MERGE_COMMITS = True
BRANCH_NAME_PATTERN = '^(?:feature|bugfix)-(\d+)$'
COMMIT_MESSAGE_TAG_TEMPLATE = '[{}]'
COMMIT_MESSAGE_SEPARATOR = ' '
BRANCH_TAG_PREFIX = '#' # 'normal' working branches
SPECIAL_BRANCH_TAG = 'HF' # special branches like master, develop, release branches etc.
MERGE_COMMIT_PATTERN = "^Merge (?:remote-tracking )?branch '[\w-]+' into [\w-]+$"
CAPITALIZE = True
REMOVE_TRAILING_DOT = True
def main() -> None:
commit_message_file = sys.argv[1]
branch_name = get_branch_name()
if SKIP_REBASE_COMMITS and branch_name_indicates_rebase(branch_name):
return
commit_message = read_commit_message(commit_message_file)
if SKIP_MERGE_COMMITS and is_merge_commit(commit_message):
return
commit_tag = create_commit_tag(branch_name)
new_commit_message = update_commit_message_if_needed(commit_message, commit_tag)
if new_commit_message != commit_message:
change_commit_message(commit_message_file, new_commit_message)
def get_branch_name() -> str:
return check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], encoding='utf-8').strip()
def branch_name_indicates_rebase(branch_name: str) -> bool:
return branch_name == 'HEAD'
def read_commit_message(message_file: str) -> str:
with open(message_file, 'r') as file:
return file.read().strip()
def is_merge_commit(commit_message: str) -> bool:
return fullmatch(MERGE_COMMIT_PATTERN, commit_message) is not None
def create_commit_tag(branch_name: str) -> str:
match = fullmatch(BRANCH_NAME_PATTERN, branch_name)
if match:
return COMMIT_MESSAGE_TAG_TEMPLATE.format(BRANCH_TAG_PREFIX + match.group(1))
return COMMIT_MESSAGE_TAG_TEMPLATE.format(SPECIAL_BRANCH_TAG)
def update_commit_message_if_needed(commit_message: str, branch_tag: str) -> str:
if CAPITALIZE:
commit_message = commit_message.capitalize()
if REMOVE_TRAILING_DOT:
commit_message = remove_trailing_dot_if_needed(commit_message)
if not is_branch_tag_present(commit_message, branch_tag):
commit_message = add_branch_tag(commit_message, branch_tag)
return commit_message
def remove_trailing_dot_if_needed(commit_message: str) -> str:
if commit_message.endswith('.'):
return commit_message[:-1]
return commit_message
def is_branch_tag_present(commit_message: str, branch_tag: str) -> bool:
return commit_message.startswith(branch_tag)
def add_branch_tag(commit_message: str, branch_tag: str) -> str:
return branch_tag + COMMIT_MESSAGE_SEPARATOR + commit_message
def change_commit_message(commit_message_file_path: str, new_commit_message: str) -> None:
truncate_file(commit_message_file_path)
save_message(commit_message_file_path, new_commit_message)
def truncate_file(file_path: str) -> None:
with open(file_path, 'w') as file_path:
file_path.truncate()
def save_message(commit_message_file_path: str, new_commit_message: str) -> None:
with open(commit_message_file_path, 'w') as file:
file.write(new_commit_message)
if __name__ == '__main__':
main()
| 33.257732
| 95
| 0.753255
|
8f3c0583c77e173d527da73f2468f70259490756
| 8,174
|
py
|
Python
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/capabilities/test_service_capabilities.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 60
|
2021-09-07T12:42:48.000Z
|
2022-03-12T09:30:36.000Z
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/capabilities/test_service_capabilities.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 222
|
2021-10-29T22:00:27.000Z
|
2022-03-29T20:56:34.000Z
|
low_level_simulation/src/rosbridge_suite/rosbridge_library/test/capabilities/test_service_capabilities.py
|
abiantorres/autonomous-vehicles-system-simulation
|
3f0112036b2b270f5055729c648a1310976df933
|
[
"Apache-2.0"
] | 1
|
2021-08-13T08:30:25.000Z
|
2021-08-13T08:30:25.000Z
|
#!/usr/bin/env python
import rospy
import rostest
import unittest
from json import loads, dumps
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.service_response import ServiceResponse
from rosbridge_library.protocol import Protocol
from rosbridge_library.protocol import InvalidArgumentException, MissingArgumentException
class TestServiceCapabilities(unittest.TestCase):
def setUp(self):
self.proto = Protocol(self._testMethodName)
# change the log function so we can verify errors are logged
self.proto.log = self.mock_log
# change the send callback so we can access the rosbridge messages
# being sent
self.proto.send = self.local_send_cb
self.advertise = AdvertiseService(self.proto)
self.unadvertise = UnadvertiseService(self.proto)
self.response = ServiceResponse(self.proto)
self.received_message = None
self.log_entries = []
def local_send_cb(self, msg):
self.received_message = msg
def mock_log(self, loglevel, message, _=None):
self.log_entries.append((loglevel, message))
def test_advertise_missing_arguments(self):
advertise_msg = loads(dumps({"op": "advertise_service"}))
self.assertRaises(MissingArgumentException,
self.advertise.advertise_service, advertise_msg)
def test_advertise_invalid_arguments(self):
advertise_msg = loads(dumps({"op": "advertise_service",
"type": 42,
"service": None}))
self.assertRaises(InvalidArgumentException,
self.advertise.advertise_service, advertise_msg)
def test_response_missing_arguments(self):
response_msg = loads(dumps({"op": "service_response"}))
self.assertRaises(MissingArgumentException,
self.response.service_response, response_msg)
# this message has the optional fields, with correct types, but not the
# required ones
response_msg = loads(dumps({"op": "service_response",
"id": "dummy_service",
"values": "none"}))
self.assertRaises(MissingArgumentException,
self.response.service_response, response_msg)
def test_response_invalid_arguments(self):
response_msg = loads(dumps({"op": "service_response",
"service": 5,
"result": "error"}))
self.assertRaises(InvalidArgumentException,
self.response.service_response, response_msg)
def test_advertise_service(self):
service_path = "/set_bool_1"
advertise_msg = loads(dumps({"op": "advertise_service",
"type": "std_srvs/SetBool",
"service": service_path}))
self.advertise.advertise_service(advertise_msg)
# This throws an exception if the timeout is exceeded (i.e. the service
# is not properly advertised)
rospy.wait_for_service(service_path, 1.0)
def test_call_advertised_service(self):
service_path = "/set_bool_2"
advertise_msg = loads(dumps({"op": "advertise_service",
"type": "std_srvs/SetBool",
"service": service_path}))
self.advertise.advertise_service(advertise_msg)
# Call the service via rosbridge because rospy.ServiceProxy.call() is
# blocking
call_service = CallService(self.proto)
call_service.call_service(loads(dumps({"op": "call_service",
"id": "foo",
"service": service_path,
"args": [True]})))
loop_iterations = 0
while self.received_message is None:
rospy.sleep(rospy.Duration(0.5))
loop_iterations += 1
if loop_iterations > 3:
self.fail("did not receive service call rosbridge message "
"after waiting 2 seconds")
self.assertFalse(self.received_message is None)
self.assertTrue("op" in self.received_message)
self.assertTrue(self.received_message["op"] == "call_service")
self.assertTrue("id" in self.received_message)
# Now send the response
response_msg = loads(dumps({"op": "service_response",
"service": service_path,
"id": self.received_message["id"],
"values": {"success": True,
"message": ""},
"result": True}))
self.received_message = None
self.response.service_response(response_msg)
loop_iterations = 0
while self.received_message is None:
rospy.sleep(rospy.Duration(0.5))
loop_iterations += 1
if loop_iterations > 3:
self.fail("did not receive service response rosbridge message "
"after waiting 2 seconds")
self.assertFalse(self.received_message is None)
# Rosbridge should forward the response message to the "client"
# (i.e. our custom send function, see setUp())
self.assertEqual(self.received_message["op"], "service_response")
self.assertTrue(self.received_message["result"])
def test_unadvertise_with_live_request(self):
service_path = "/set_bool_3"
advertise_msg = loads(dumps({"op": "advertise_service",
"type": "std_srvs/SetBool",
"service": service_path}))
self.advertise.advertise_service(advertise_msg)
# Call the service via rosbridge because rospy.ServiceProxy.call() is
# blocking
call_service = CallService(self.proto)
call_service.call_service(loads(dumps({"op": "call_service",
"id": "foo",
"service": service_path,
"args": [True]})))
loop_iterations = 0
while self.received_message is None:
rospy.sleep(rospy.Duration(0.5))
loop_iterations += 1
if loop_iterations > 3:
self.fail("did not receive service call rosbridge message "
"after waiting 2 seconds")
self.assertFalse(self.received_message is None)
self.assertTrue("op" in self.received_message)
self.assertTrue(self.received_message["op"] == "call_service")
self.assertTrue("id" in self.received_message)
# Now send the response
response_msg = loads(dumps({"op": "unadvertise_service",
"service": service_path}))
self.received_message = None
self.unadvertise.unadvertise_service(response_msg)
loop_iterations = 0
while self.received_message is None:
rospy.sleep(rospy.Duration(0.5))
loop_iterations += 1
if loop_iterations > 3:
self.fail("did not receive service response rosbridge message "
"after waiting 2 seconds")
self.assertFalse(self.received_message is None)
# Rosbridge should abort the existing service call with an error
# (i.e. "result" should be False)
self.assertEqual(self.received_message["op"], "service_response")
self.assertFalse(self.received_message["result"])
PKG = 'rosbridge_library'
NAME = 'test_service_capabilities'
if __name__ == '__main__':
rospy.init_node(NAME)
rostest.rosrun(PKG, NAME, TestServiceCapabilities)
| 44.183784
| 89
| 0.592855
|
de38a74ed2236dd2176f62471c6213bdeb0251e5
| 389
|
py
|
Python
|
src/ontask_lti/utils.py
|
japesone/ontask_b
|
17af441f9893c521d2e14011e7790ba4077e3318
|
[
"MIT"
] | 18
|
2015-02-04T06:56:02.000Z
|
2021-01-14T08:48:05.000Z
|
ims_lti_py/utils.py
|
mitodl/ims_lti_py
|
d96a6201cf3b63b9e1b33780ef5e29bc65242ceb
|
[
"MIT"
] | 8
|
2020-06-05T17:03:01.000Z
|
2022-03-11T23:12:32.000Z
|
perspectivesx_project/ims_lti_py/utils.py
|
UQ-UQx/PerspectivesX
|
94b13aa7d4966b8a34116bd9526d8809a25e0baf
|
[
"MIT"
] | 27
|
2015-04-16T12:05:42.000Z
|
2020-12-24T00:32:53.000Z
|
from uuid import uuid1
def generate_identifier():
return uuid1().__str__()
class InvalidLTIConfigError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InvalidLTIRequestError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 22.882353
| 40
| 0.678663
|
b5cee94316ceebe7929ee6d019d967b00171c86e
| 21,298
|
py
|
Python
|
examples/get_answer_byrules.py
|
Borororo/interpretable_ropes
|
c083cc388998a1cfb6720a92ecb943b0edcee204
|
[
"Apache-2.0"
] | 3
|
2020-10-03T04:07:19.000Z
|
2020-11-29T13:48:18.000Z
|
examples/get_answer_byrules.py
|
Borororo/Interpretable-Modular-KR-for-MRC
|
cc921b0e04947663fcc60f0552ca8d8907ca6a1b
|
[
"Apache-2.0"
] | null | null | null |
examples/get_answer_byrules.py
|
Borororo/Interpretable-Modular-KR-for-MRC
|
cc921b0e04947663fcc60f0552ca8d8907ca6a1b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import json
from transformers.data.metrics.squad_metrics import (
compute_f1
)
import os
import random
def read_predicted_file(predict_file,pred):
Object_question_words ={"which",'in which','whose','who','what','for which','on which'}
comparative_words =[
'more', 'less', 'higher', 'lower', 'increase', 'decrease', 'high', 'low', 'harder', 'easier', 'increasing',
'decreasing', 'up', 'down', 'larger', 'smaller','better','worse','faster','slower','weaker','stronger','closer','farther','louder','quieter','correctly','incorrectly','not','yes','no','not'
]
positive_words = ['more', 'higher', 'increase', 'high', 'harder', 'increasing', 'up', 'larger', 'better', 'faster', 'stronger', 'closer', 'louder', 'correctly']
negative_words = ['less', 'lower', 'decrease', 'low', 'easier', 'decreasing', 'down', 'smaller', 'worse', 'slower', 'weaker', 'farther', 'quieter', 'incorrectly','fewer','not','avoid']
all_object_scores = []
record={}
pred_f1 =[]
filtered = [1912355095, 580926078, 2893035919, 3005625773, 4037359159, 1090395805, 1111432861, 1772822810,
1779114266, 3003444033, 1687481522, 373839753, 3209385804, 3128907286, 1746228971, 4088330372,
4227418477, 4047850835,
302402461, 603409309, 2849858743, 2050057087, 336287454, 1787516699, 184586157, 2999947384, 1202686909,
1223723965, 2335934912, 2677049981, 1182285725, 430326667, 4183507762, 1091390005, 1076054581,
3847534556,
4074158044, 955038082, 1423883267,
1626887498, 184177891, 1586058524, 156980418, 1607423284, 1608996147, 3071497657,
3865282641, 1588430868, 3876948090, 3344467660, 3346695902, 3600844723, 3227879116, 3484256179,
2934519278, 3876075109, 2041918832, 1115960569, 1108882681, 2013214064, 3895408229, 2975020526,
3885579536, 356328641, 985605428, 809437101, 494407430, 3999682891, 1562329124, 1505378340, 2022868633,
2022147737, 1729594789, 1730315685, 1082622173, 1205043665, 1081901277, 1204322769]
filtered_id = [str(i) for i in filtered]
with open(predict_file, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
situation_text = paragraph['situation']
for qa in paragraph['qas']:
qas_id = qa['id']
question_text =qa['question']
answer_text = qa['answers'][0]['text']
predicts = qa['predicts']
if any(i==answer_text for i in comparative_words):
continue
if qas_id in filtered_id:
continue
if any(question_text.lower().startswith(i) for i in Object_question_words):
if max(compute_f1(answer_text, predicts['object1']),
compute_f1(answer_text, predicts['object2'])) == 0.0:
continue
predicts_answer = make_choice_pos(predicts)
for word in positive_words:
if word in question_text.lower():
predicts_answer = make_choice_pos(predicts)
break
for word in negative_words:
if word in question_text.lower():
if word not in predicts['TP in back'].lower():
predicts_answer = make_choice_neg(predicts)
# print(qas_id,predicts_answer)
break
f1 = compute_f1(remove_punc(answer_text),remove_punc(predicts_answer))
pred_f1.append(compute_f1(remove_punc(answer_text), remove_punc(pred[qas_id])))
record[qas_id] = [f1,answer_text,predicts_answer]
all_object_scores.append(f1)
else:
continue
return all_object_scores,record,pred_f1
def read_predicted_file_comparative(predict_file,pred):
question_word = ['would','will']
comparative_words = [
'more', 'less', 'higher', 'lower', 'increase', 'decrease', 'harder', 'easier', 'increasing',
'decreasing', 'larger', 'smaller', 'better', 'worse', 'faster', 'slower', 'weaker', 'stronger',
'closer', 'farther', 'louder', 'quieter', 'correctly', 'incorrectly', 'not', 'yes', 'no', 'not'
]
pairs = {
'more':'less',
'higher': 'lower',
'increase':'decrease',
'harder':'easier',
'increasing':'decreasing',
'larger':'smaller',
'better':'worse',
'faster':'slower',
'stronger':'weaker',
'closer':'farther',
'louder':'quieter',
'correctly': 'incorrectly',
}
filtered = [1912355095, 580926078, 2893035919, 3005625773, 4037359159, 1090395805, 1111432861, 1772822810,
1779114266, 3003444033, 1687481522, 373839753, 3209385804, 3128907286, 1746228971, 4088330372,
4227418477, 4047850835,
302402461, 603409309, 2849858743, 2050057087, 336287454, 1787516699, 184586157, 2999947384, 1202686909,
1223723965, 2335934912, 2677049981, 1182285725, 430326667, 4183507762, 1091390005, 1076054581,
3847534556,
4074158044, 955038082, 1423883267,
1626887498, 184177891, 1586058524, 156980418, 1607423284, 1608996147, 3071497657,
3865282641, 1588430868, 3876948090, 3344467660, 3346695902, 3600844723, 3227879116, 3484256179,
2934519278, 3876075109, 2041918832, 1115960569, 1108882681, 2013214064, 3895408229, 2975020526,
3885579536, 356328641, 985605428, 809437101, 494407430, 3999682891, 1562329124, 1505378340, 2022868633,
2022147737, 1729594789, 1730315685, 1082622173, 1205043665, 1081901277, 1204322769]
filtered_id = [str(i) for i in filtered]
cnt ={}
prediction ={}
f1 =[]
pred_f1=[]
debug={}
with open(predict_file, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
situation_text = paragraph['situation']
for qa in paragraph['qas']:
candidate=[]
qas_id = qa['id']
question_text = qa['question']
answer_text = qa['answers'][0]['text']
predicts = qa['predicts']
if not any(i == answer_text for i in comparative_words):
continue
if qas_id in filtered_id:
continue
# if any(question_text.lower().startswith(i) for i in question_word):
# continue
for key, val in pairs.items():
if key in question_text.lower() and val in question_text.lower():
candidate= [key,val]
break
if candidate!=[]:
o1_ind = question_text.lower().find(remove_punc(predicts["object1"].lower()))
o2_ind = question_text.lower().find(remove_punc(predicts["object2"].lower()))
than_ind = question_text.lower().find("than")
indx = [o1_ind,o2_ind,than_ind]
if (o1_ind== -1 and o2_ind ==-1 ) or o1_ind==o2_ind:
# print(qas_id,question_text)
continue
elif o1_ind!= -1 and o2_ind ==-1:
if than_ind != -1:
if o1_ind<than_ind:
o2_ind =1000
elif o1_ind == -1 and o2_ind != -1:
if than_ind != -1:
if o2_ind < than_ind:
o1_ind = 1000
if o1_ind < o2_ind:
if int(predicts['TP_relevance']) ==0:
prediction[qas_id] = [compute_f1(answer_text,candidate[0]),candidate[0],candidate,indx]
f1.append(compute_f1(answer_text,candidate[0]))
else:
prediction[qas_id] = [compute_f1(answer_text,candidate[1]),candidate[1],candidate,indx]
f1.append(compute_f1(answer_text, candidate[1]))
else:
if int(predicts['TP_relevance']) ==0:
prediction[qas_id] = [compute_f1(answer_text,candidate[1]),candidate[1],candidate,indx]
f1.append(compute_f1(answer_text, candidate[1]))
else:
prediction[qas_id] = [compute_f1(answer_text,candidate[0]),candidate[0],candidate,indx]
f1.append(compute_f1(answer_text, candidate[0]))
pred_f1.append(compute_f1(answer_text, pred[qas_id]))
cnt[qas_id] = [question_text,candidate]
return pred_f1,f1,prediction
def make_choice_pos(predicts):
if int(predicts['TP_relevance']) ==0:
return predicts["object1"]
else:
return predicts["object2"]
def make_choice_neg(predicts):
if int(predicts['TP_relevance']) ==1:
return predicts["object1"]
else:
return predicts["object2"]
def remove_punc(text):
ends =['.',',',"'s"]
if text.endswith('.') or text.endswith(','):
text = text[:-1]
elif text.endswith("'s"):
text = text[:-2]
return text
def shuffle_and_make_CV(label_path,train_path,dev_path,out_path,fold =5,thresh=2500):
t1 = []
t2 = []
t3 = []
t4 = []
t5 = []
with open(label_path, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
ignored_id=[]
cnt = 0
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
situation_text = paragraph['situation']
for qa in paragraph['qas']:
qas_id = qa['id']
ignored_id.append(qas_id)
all_examples_except_label={}
label_data =[]
with open(train_path, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
is_label = False
for qa in paragraph['qas']:
qas_id = qa['id']
if qas_id in ignored_id:
is_label = True
else:
cnt +=1
if not is_label:
try:
all_examples_except_label[paragraph_text].append(paragraph)
except:
all_examples_except_label[paragraph_text]=[paragraph]
else:
label_data.append(paragraph)
with open(dev_path, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
is_label = False
for qa in paragraph['qas']:
qas_id = qa['id']
if qas_id in ignored_id:
is_label = True
else:
cnt += 1
if not is_label:
try:
all_examples_except_label[paragraph_text].append(paragraph)
except:
all_examples_except_label[paragraph_text]=[paragraph]
l = list(all_examples_except_label.items())
random.shuffle(l)
all_examples_except_label = dict(l)
current=[]
no_q = []
current_len=0
splitted= []
total = 0
for val in all_examples_except_label.values():
for sit in val:
current.append(sit)
current_len +=len(sit['qas'])
total+=len(sit['qas'])
if current_len>=thresh and len(splitted)<fold-1:
splitted.append(current)
no_q.append(current_len)
current_len =0
current = []
if len(splitted) == fold - 1:
current.append(sit)
current_len+=len(sit['qas'])
no_q.append(current_len)
splitted.append(current)
print(total,cnt,no_q)
next = input("ok?")
if next !="y":
pass
else:
for ind,split in enumerate(splitted):
out_path_split =os.path.join(out_path,'split_'+str(ind)+'.json')
writer = open(out_path_split,'w+',encoding='utf-8')
out = {
"version": "1.0",
"data": [
{
"title": "ropes",
"paragraphs": split,
}]
}
writer.write(json.dumps(out,indent=4))
writer.close()
setting1 = label_data+splitted[0]+splitted[1]+splitted[2]
setting2 = label_data+splitted[0]+splitted[1]+splitted[4]
setting3 = label_data+splitted[0]+splitted[3]+splitted[4]
setting4 = label_data+splitted[2]+splitted[3]+splitted[4]
setting5 = label_data+splitted[1]+splitted[2]+splitted[3]
settings = {
'123':setting1,
'125':setting2,
'145':setting3,
'345':setting4,
'234':setting5
}
for key,val in settings.items():
out_path_split = os.path.join(out_path, 'combine_' + str(key) + '.json')
writer = open(out_path_split,'w+',encoding='utf-8')
out = {
"version": "1.0",
"data": [
{
"title": "ropes",
"paragraphs": val,
}]
}
writer.write(json.dumps(out,indent=4))
writer.close()
return thresh,all_examples_except_label,label_data
def assign_new_value_for_shuffled(shuffled_path,train_path,dev_path,out_path):
all_examples = {}
with open(train_path, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
all_examples[qas_id] = qa
with open(dev_path, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
all_examples[qas_id] = qa
settings = ['123','125','145', '345', '234']
for idx,combine in enumerate(settings):
out_path_split = os.path.join(out_path, 'split_' + str(idx) + '.json')
writer = open(out_path_split, 'w+', encoding='utf-8')
with open(os.path.join(shuffled_path,'split_' + str(idx) + '.json'), "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
qa["synthetic_text"] = all_examples[qas_id]["synthetic_text"]
qa["predicts"] = all_examples[qas_id]["predicts"]
writer.write(json.dumps(input_data,indent=4))
writer.close()
combine_path = os.path.join(out_path, 'combine_' + str(combine) + '.json')
writer = open(combine_path, 'w+', encoding='utf-8')
with open(os.path.join(shuffled_path,'combine_' + str(combine) + '.json'), "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
qa["synthetic_text"] = all_examples[qas_id]["synthetic_text"]
qa["predicts"] = all_examples[qas_id]["predicts"]
writer.write(json.dumps(input_data,indent=4))
writer.close()
return None
def read_predicted_file_nn(predict_file,pred):
Object_question_words ={"which",'in which','whose','who','what','for which','on which'}
comparative_words =[
'more', 'less', 'higher', 'lower', 'increase', 'decrease', 'high', 'low', 'harder', 'easier', 'increasing',
'decreasing', 'up', 'down', 'larger', 'smaller','better','worse','faster','slower','weaker','stronger','closer','farther','louder','quieter','correctly','incorrectly','not','yes','no','not'
]
positive_words = ['more', 'higher', 'increase', 'high', 'harder', 'increasing', 'up', 'larger', 'better', 'faster', 'stronger', 'closer', 'louder', 'correctly']
negative_words = ['less', 'lower', 'decrease', 'low', 'easier', 'decreasing', 'down', 'smaller', 'worse', 'slower', 'weaker', 'farther', 'quieter', 'incorrectly','fewer','not','avoid']
all_object_scores = []
record={}
pred_f1 =[]
with open(predict_file, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
situation_text = paragraph['situation']
for qa in paragraph['qas']:
qas_id = qa['id']
question_text =qa['question']
answer_text = qa['answers'][0]['text']
predicts = qa['predicts']
if any(i==answer_text for i in comparative_words):
continue
if any(question_text.lower().startswith(i) for i in Object_question_words):
# if max(compute_f1(answer_text, predicts['object1']),
# compute_f1(answer_text, predicts['object2'])) == 0.0:
# continue
predicts_answer = pred[qas_id]
# f1 = compute_f1(remove_punc(answer_text),remove_punc(predicts_answer))
pred_f1.append(compute_f1(remove_punc(answer_text), remove_punc(pred[qas_id])))
# record[qas_id] = [f1,answer_text,predicts_answer]
# all_object_scores.append(f1)
else:
continue
return all_object_scores,record,pred_f1
def read_predicted_file_comparative_nn(predict_file,pred):
question_word = ['would','will']
comparative_words = [
'more', 'less', 'higher', 'lower', 'increase', 'decrease', 'harder', 'easier', 'increasing',
'decreasing', 'larger', 'smaller', 'better', 'worse', 'faster', 'slower', 'weaker', 'stronger',
'closer', 'farther', 'louder', 'quieter', 'correctly', 'incorrectly', 'not', 'yes', 'no', 'not'
]
pairs = {
'more':'less',
'higher': 'lower',
'increase':'decrease',
'harder':'easier',
'increasing':'decreasing',
'larger':'smaller',
'better':'worse',
'faster':'slower',
'stronger':'weaker',
'closer':'farther',
'louder':'quieter',
'correctly': 'incorrectly',
}
cnt ={}
prediction ={}
f1 =[]
pred_f1=[]
debug={}
with open(predict_file, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["background"]
situation_text = paragraph['situation']
for qa in paragraph['qas']:
candidate=[]
qas_id = qa['id']
question_text = qa['question']
answer_text = qa['answers'][0]['text']
predicts = qa['predicts']
if not any(i == answer_text for i in comparative_words):
continue
# if any(question_text.lower().startswith(i) for i in question_word):
# continue
for key, val in pairs.items():
if key in question_text.lower() and val in question_text.lower():
candidate= [key,val]
break
if candidate!=[]:
pred_f1.append(compute_f1(answer_text, pred[qas_id]))
return pred_f1
def get_id(data1,data2):
all_examples = []
ids =[]
with open(data1, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
all_examples.append(qas_id)
with open(data2, "r", encoding="utf-8")as reader:
input_data = json.load(reader)
for entry in input_data["data"]:
for paragraph in entry["paragraphs"]:
for qa in paragraph['qas']:
qas_id = qa['id']
if qas_id not in all_examples:
ids.append(qas_id)
return ids
| 44.932489
| 197
| 0.550897
|
b331e701b53e8e64522bbf7aa02063c808ff8680
| 10,151
|
py
|
Python
|
openapi_python_client/parser/properties/model_property.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
openapi_python_client/parser/properties/model_property.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
openapi_python_client/parser/properties/model_property.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
from itertools import chain
from typing import ClassVar, Dict, List, NamedTuple, Optional, Set, Tuple, Union
import attr
from ... import schema as oai
from ... import utils
from ..errors import PropertyError
from ..reference import Reference
from .property import Property
from .file_property import FileProperty
from .schemas import Schemas
@attr.s(auto_attribs=True, frozen=True)
class ModelProperty(Property):
"""A property which refers to another Schema"""
reference: Reference
required_properties: List[Property]
optional_properties: List[Property]
file_property_names: Set[str]
description: str
relative_imports: Set[str]
additional_properties: Union[bool, Property] = False
_json_type_string: ClassVar[str] = "Dict[str, Any]"
template: ClassVar[str] = "model_property.py.jinja"
json_is_dict: ClassVar[bool] = True
parent_name: str = ""
child_model: bool = False
model_type: str = "object"
array_items_property: Optional[Property] = None
def get_type_string(self, no_optional: bool = False) -> str:
"""Get a string representation of type that should be used when declaring this property"""
type_string = self.reference.class_name
if no_optional:
return type_string
if self.nullable:
# type_string = f"Union[{type_string}, None]"
type_string = f"Optional[{type_string}]"
if not self.required:
type_string = f"Optional[{type_string}]"
# type_string = f"Optional[{type_string}]"
return type_string
def get_base_type_string(self, json: bool = False) -> str:
return self.reference.class_name
def get_imports(self, *, prefix: str) -> Set[str]:
"""
Get a set of import strings that should be included when this property is used somewhere
Args:
prefix: A prefix to put before any relative (local) module names. This should be the number of . to get
back to the root of the generated client.
"""
imports = super().get_imports(prefix=prefix)
imports.update(
{
# f"from {prefix}models.{self.reference.module_name} import {self.reference.class_name}",
f"from {prefix}models import {self.reference.class_name}",
"from typing import Dict",
"from typing import cast",
}
)
return imports
def _merge_properties(first: Property, second: Property) -> Union[Property, PropertyError]:
if first.__class__ != second.__class__:
return PropertyError(header="Cannot merge properties", detail="Properties are two different types")
nullable = first.nullable and second.nullable
required = first.required or second.required
first = attr.evolve(first, nullable=nullable, required=required)
second = attr.evolve(second, nullable=nullable, required=required)
if first != second:
return PropertyError(header="Cannot merge properties", detail="Properties has conflicting values")
return first
class _PropertyData(NamedTuple):
optional_props: List[Property]
required_props: List[Property]
relative_imports: Set[str]
schemas: Schemas
def _process_properties(*, data: oai.Schema, schemas: Schemas, class_name: str) -> Union[_PropertyData, PropertyError]:
from . import property_from_data
properties: Dict[str, Property] = {}
relative_imports: Set[str] = set()
required_set = set(data.required or [])
def _check_existing(prop: Property) -> Union[Property, PropertyError]:
nonlocal properties
existing = properties.get(prop.name)
prop_or_error = _merge_properties(existing, prop) if existing else prop
if isinstance(prop_or_error, PropertyError):
prop_or_error.header = f"Found conflicting properties named {prop.name} when creating {class_name}"
return prop_or_error
properties[prop_or_error.name] = prop_or_error
return prop_or_error
unprocessed_props = data.properties or {}
for sub_prop in data.allOf or []:
if isinstance(sub_prop, oai.Reference):
source_name = Reference.from_ref(sub_prop.ref).class_name
sub_model = schemas.models.get(source_name)
if sub_model is None:
return PropertyError(f"Reference {sub_prop.ref} not found")
for prop in chain(sub_model.required_properties, sub_model.optional_properties):
prop_or_error = _check_existing(prop)
if isinstance(prop_or_error, PropertyError):
return prop_or_error
else:
unprocessed_props.update(sub_prop.properties or {})
required_set.update(sub_prop.required or [])
for key, value in unprocessed_props.items():
prop_required = key in required_set
prop_or_error, schemas = property_from_data(
name=key, required=prop_required, data=value, schemas=schemas, parent_name=class_name, child_property=True
)
if isinstance(prop_or_error, Property):
prop_or_error = _check_existing(prop_or_error)
if isinstance(prop_or_error, PropertyError):
return prop_or_error
properties[prop_or_error.name] = prop_or_error
required_properties = []
optional_properties = []
for prop in properties.values():
if prop.required and not prop.nullable:
required_properties.append(prop)
else:
optional_properties.append(prop)
relative_imports.update(prop.get_imports(prefix="."))
return _PropertyData(
optional_props=optional_properties,
required_props=required_properties,
relative_imports=relative_imports,
schemas=schemas,
)
def _get_additional_properties(
*, schema_additional: Union[None, bool, oai.Reference, oai.Schema], schemas: Schemas, class_name: str
) -> Tuple[Union[bool, Property, PropertyError], Schemas]:
from . import property_from_data
if schema_additional is None:
return False, schemas
if isinstance(schema_additional, bool):
return schema_additional, schemas
if isinstance(schema_additional, oai.Schema) and not any(schema_additional.dict().values()):
# An empty schema
return True, schemas
additional_properties, schemas = property_from_data(
name="AdditionalProperty",
required=True, # in the sense that if present in the dict will not be None
data=schema_additional,
schemas=schemas,
parent_name=class_name,
child_property=True,
)
return additional_properties, schemas
def build_model_property(
*,
data: oai.Schema,
name: str,
schemas: Schemas,
required: bool,
parent_name: Optional[str],
child_property: Optional[bool] = False,
) -> Tuple[Union[ModelProperty, PropertyError], Schemas]:
"""
A single ModelProperty from its OAI data
Args:
data: Data of a single Schema
name: Name by which the schema is referenced, such as a model name.
Used to infer the type name if a `title` property is not available.
schemas: Existing Schemas which have already been processed (to check name conflicts)
required: Whether or not this property is required by the parent (affects typing)
parent_name: The name of the property that this property is inside of (affects class naming)
"""
from . import build_list_property
class_name = data.title or name
if parent_name:
if child_property:
# class_name = f"{utils.pascal_case(parent_name)}_{utils.pascal_case(class_name)}"
class_name = f"{utils.pascal_case(class_name)}"
else:
class_name = f"{utils.pascal_case(parent_name)}{utils.pascal_case(class_name)}"
ref = Reference.from_ref(class_name)
property_data = _process_properties(data=data, schemas=schemas, class_name=class_name)
if isinstance(property_data, PropertyError):
return property_data, schemas
schemas = property_data.schemas
additional_properties, schemas = _get_additional_properties(
schema_additional=data.additionalProperties, schemas=schemas, class_name=class_name
)
if isinstance(additional_properties, Property):
property_data.relative_imports.update(additional_properties.get_imports(prefix="."))
elif isinstance(additional_properties, PropertyError):
return additional_properties, schemas
array_prop = None
if data.type == "array":
array_prop, schemas = build_list_property(
data=data, name=name, required=required, schemas=schemas, parent_name=parent_name
)
file_properties = []
for prop in property_data.required_props:
if isinstance(prop, FileProperty):
file_properties.append(prop.python_name)
for prop in property_data.optional_props:
if isinstance(prop, FileProperty):
file_properties.append(prop.python_name)
prop = ModelProperty(
reference=ref,
required_properties=property_data.required_props,
optional_properties=property_data.optional_props,
relative_imports=property_data.relative_imports,
description=data.description or "",
default=None,
nullable=data.nullable,
required=required,
name=name,
additional_properties=additional_properties,
parent_name=parent_name,
child_model=child_property,
example=data.example,
model_type=data.type if data.type else "object",
array_items_property=array_prop,
file_property_names=set(file_properties)
)
if child_property:
return prop, schemas
if prop.reference.class_name in schemas.models:
error = PropertyError(
data=data, detail=f'Attempted to generate duplicate models with name "{prop.reference.class_name}"'
)
return error, schemas
schemas = attr.evolve(schemas, models={**schemas.models, prop.reference.class_name: prop})
return prop, schemas
| 38.018727
| 119
| 0.685548
|
81b07a350dd22f44077546c48112cccdec1ba031
| 5,112
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_service_association_links_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_service_association_links_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/operations/_service_association_links_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceAssociationLinksOperations(object):
"""ServiceAssociationLinksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ServiceAssociationLinksListResult"
"""Gets a list of service association links for a subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceAssociationLinksListResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ServiceAssociationLinksListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceAssociationLinksListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceAssociationLinksListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}/ServiceAssociationLinks'} # type: ignore
| 46.899083
| 223
| 0.689358
|
c4e537a2ade2b7c81f73633ee1e9a0a03ed8536d
| 3,142
|
py
|
Python
|
testframework/test.py
|
SINTEF-9012/config-testing
|
a8e7b45d1bd3354231d971e868e167d6af187b90
|
[
"MIT"
] | null | null | null |
testframework/test.py
|
SINTEF-9012/config-testing
|
a8e7b45d1bd3354231d971e868e167d6af187b90
|
[
"MIT"
] | null | null | null |
testframework/test.py
|
SINTEF-9012/config-testing
|
a8e7b45d1bd3354231d971e868e167d6af187b90
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See the NOTICE file distributed with this work for additional
# information regarding copyright ownership.
#
import os
import sys
import pwd
import ConfigParser
import subprocess
import StringIO
import shutil
CONFIG_NAME = 'config.ini'
CONFIG_GENERAL_SEC = 'general'
CONFIG_TESTING_SCRIPT = 'run_config_testing.py'
GENERAL_ROOT_TEST_FOLDER = 'root_test_folder'
GENERAL_TEST_WORKING_FOLDER = 'test_working_folder'
GENERAL_GLOBAL_REPORT_DIR = 'global_report_dir'
GENERAL_SYSTEM_UNDER_TEST = 'system_under_test'
GENERAL_SUT_CONFIG_TESTING_FOLDER = 'sut_config_testing_folder'
SCRIP_ABSOLUTE_PATH = os.path.dirname(os.path.realpath(__file__))
def print_std(stdout, stderr):
for line in StringIO.StringIO(stdout).readlines():
print "[STDOUT] " + line.rstrip()
for line in StringIO.StringIO(stderr).readlines():
print "[STDERR] " + line.rstrip()
def folder_path(parent, child):
parent = parent.strip()
child = child.strip()
return os.path.join(parent, child)
def execute_framework():
root_test_folder = SCRIP_ABSOLUTE_PATH
config = ConfigParser.RawConfigParser()
config.read(os.path.join(root_test_folder, CONFIG_NAME))
test_working_folder = config.get(CONFIG_GENERAL_SEC, GENERAL_TEST_WORKING_FOLDER)
global_report_dir = config.get(CONFIG_GENERAL_SEC, GENERAL_GLOBAL_REPORT_DIR)
system_under_test = config.get(CONFIG_GENERAL_SEC, GENERAL_SYSTEM_UNDER_TEST)
config_testing_folder = config.get(CONFIG_GENERAL_SEC, GENERAL_SUT_CONFIG_TESTING_FOLDER)
full_test_working_folder = folder_path(root_test_folder, test_working_folder)
full_global_report_dir = folder_path(root_test_folder, global_report_dir)
full_system_under_test_dir = folder_path(root_test_folder, system_under_test)
full_config_testing_dir = folder_path(full_system_under_test_dir, config_testing_folder)
#clean working directory and clean directories
if os.path.isdir(full_test_working_folder):
shutil.rmtree(full_test_working_folder)
os.makedirs(full_test_working_folder)
if os.path.isdir(full_global_report_dir):
shutil.rmtree(full_global_report_dir)
os.makedirs(full_global_report_dir)
full_config_testing_script = os.path.join(full_config_testing_dir, CONFIG_TESTING_SCRIPT)
command = [full_config_testing_script, full_test_working_folder, full_global_report_dir, full_system_under_test_dir, full_config_testing_dir]
print "Staring: " + " ".join(command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
print_std(stdout, stderr)
if __name__ == "__main__":
execute_framework()
print "Done!"
| 36.114943
| 142
| 0.81254
|
5f66dbab0261512f2292b523db965e01eb4b2071
| 7,293
|
py
|
Python
|
mmdet/apis/test.py
|
zjzhou521/mmdet_2backbone
|
abf6993e3842c96e4d3dfcb172c2eddfac16e1b4
|
[
"Apache-2.0"
] | null | null | null |
mmdet/apis/test.py
|
zjzhou521/mmdet_2backbone
|
abf6993e3842c96e4d3dfcb172c2eddfac16e1b4
|
[
"Apache-2.0"
] | null | null | null |
mmdet/apis/test.py
|
zjzhou521/mmdet_2backbone
|
abf6993e3842c96e4d3dfcb172c2eddfac16e1b4
|
[
"Apache-2.0"
] | 1
|
2021-06-18T18:14:38.000Z
|
2021-06-18T18:14:38.000Z
|
import os.path as osp
import pickle
import shutil
import tempfile
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info
from mmdet.core import encode_mask_results
def single_gpu_test(model, data_loader_vis, data_loader_ir, show=False, out_dir=None, show_score_thr=0.3):
model.eval()
results = []
# print("in test: num = ",len(dataset_vis))
# print("[test]dataset_vis = ",dataset_vis)
# for i, data_batch_vis in enumerate(data_loader_vis): # 2 imgs/batch
# print("i=",i,end=' ')
# print("data_batch_vis = ",data_batch_vis)
dataset_vis = data_loader_vis.dataset
dataset_ir = data_loader_ir.dataset
prog_bar = mmcv.ProgressBar(len(dataset_vis))
for i, data_vis in enumerate(data_loader_vis):
for j, data_ir in enumerate(data_loader_ir):
if(j!=i): continue
else: break
with torch.no_grad():
result = model(return_loss=False, rescale=True, inputs=[data_vis, data_ir])
# import numpy as np
# tmp = np.array(result)
# print("tmp.shape = ",tmp.shape)
# print("result[0] = ",result[0])
batch_size = len(result)
if show or out_dir:
if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
img_tensor = data['img'][0]
else:
img_tensor = data['img'][0].data[0]
img_metas = data['img_metas'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
assert len(imgs) == len(img_metas)
for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
ori_h, ori_w = img_meta['ori_shape'][:-1]
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
model.module.show_result(img_show, result[i], show=show, out_file=out_file, score_thr=show_score_thr)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
for _ in range(batch_size):
prog_bar.update()
# break
return results
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, f'part_{i}.pkl')
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank == 0:
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(
pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 35.75
| 117
| 0.615796
|
b25f090205ccd5b3b20d97236ab7fe65c976cd42
| 976
|
py
|
Python
|
python/mysql/demo_mysqldb.py
|
kskumgk63/trace-examples
|
1de45b351aa03367c745933b4923f07d436aea3e
|
[
"BSD-3-Clause"
] | 75
|
2017-03-20T12:17:14.000Z
|
2022-03-24T02:55:53.000Z
|
python/mysql/demo_mysqldb.py
|
kskumgk63/trace-examples
|
1de45b351aa03367c745933b4923f07d436aea3e
|
[
"BSD-3-Clause"
] | 25
|
2017-05-10T18:21:20.000Z
|
2022-01-28T18:36:52.000Z
|
python/mysql/demo_mysqldb.py
|
kskumgk63/trace-examples
|
1de45b351aa03367c745933b4923f07d436aea3e
|
[
"BSD-3-Clause"
] | 54
|
2017-05-10T18:13:52.000Z
|
2022-01-11T21:04:42.000Z
|
import logging.config
from ddtrace import Pin, patch
import MySQLdb
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'ddtrace': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
})
# If not patched yet, you can patch mysql specifically
patch(mysql=True)
# This will report a span with the default settings
conn = MySQLdb.connect(user="test", password="test", host="localhost", port=3306, database="test")
cursor = conn.cursor()
cursor.execute("SHOW TABLES")
# Use a pin to specify metadata related to this connection
Pin.override(conn, service='mysql-users')
| 25.684211
| 98
| 0.577869
|
c3c4c86963dc305768bd3e6cd227c1cc7f1e986d
| 5,433
|
py
|
Python
|
samples/python/mnist/mnist_training.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 5
|
2021-02-20T04:53:48.000Z
|
2021-03-09T19:29:27.000Z
|
samples/python/mnist/mnist_training.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 5
|
2021-03-01T21:35:50.000Z
|
2022-03-09T05:38:38.000Z
|
samples/python/mnist/mnist_training.py
|
dennyac/onnxruntime
|
d5175795d2b7f2db18b0390f394a49238f814668
|
[
"MIT"
] | 2
|
2021-01-29T09:36:51.000Z
|
2021-02-01T13:42:40.000Z
|
# This code is from https://github.com/pytorch/examples/blob/master/mnist/main.py
# with modification to do training using onnxruntime as backend on cuda device.
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import onnxruntime
from onnxruntime.experimental import ORTTrainer, ORTTrainerOptions, optim
# Pytorch model
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, input1):
out = self.fc1(input1)
out = self.relu(out)
out = self.fc2(out)
return out
# ONNX Runtime training
def mnist_model_description():
return {'inputs': [('input1', ['batch', 784]),
('label', ['batch'])],
'outputs': [('loss', [], True),
('probability', ['batch', 10])]}
def my_loss(x, target):
return F.nll_loss(F.log_softmax(x, dim=1), target)
# Helpers
def train_with_trainer(log_interval, trainer, device, train_loader, epoch):
for batch_idx, (data, target) in enumerate(train_loader):
# Fetch data
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
# Train step
loss, _ = trainer.train_step(data, target)
# Stats
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss))
def test_with_trainer(trainer, device, test_loader):
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
# Fetch data
data, target = data.to(device), target.to(device)
data = data.reshape(data.shape[0], -1)
# Eval step
# Using fetches around without eval_step to not pass 'target' as input
trainer._train_step_info.fetches = ['probability']
output = F.log_softmax(trainer.eval_step(data), dim=1)
trainer._train_step_info.fetches = []
# Stats
test_loss += F.nll_loss(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
# Basic setup
args = parser.parse_args()
if not args.no_cuda and torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
torch.manual_seed(args.seed)
onnxruntime.set_seed(args.seed)
# Data loader
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('./data', train=False, transform=transforms.Compose([
transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.test_batch_size, shuffle=True)
# Modeling
model = NeuralNet(784, 500, 10)
model_desc = mnist_model_description()
optim_config = optim.SGDConfig(lr=args.lr)
opts = ORTTrainerOptions({'device': {'id': device}})
trainer = ORTTrainer(model,
model_desc,
optim_config,
loss_fn=my_loss,
options=opts)
# Train loop
for epoch in range(1, args.epochs + 1):
train_with_trainer(args.log_interval, trainer,
device, train_loader, epoch)
test_with_trainer(trainer, device, test_loader)
if __name__ == '__main__':
main()
| 37.993007
| 87
| 0.600405
|
8a139fb21352f9b04ad41cec50fba0a027c1ffa4
| 4,405
|
py
|
Python
|
contrib/devtools/logprint-scanner.py
|
robinadaptor/chronon
|
630b3945824c1b1cd2ea67ca80835a9f669b9124
|
[
"MIT"
] | null | null | null |
contrib/devtools/logprint-scanner.py
|
robinadaptor/chronon
|
630b3945824c1b1cd2ea67ca80835a9f669b9124
|
[
"MIT"
] | null | null | null |
contrib/devtools/logprint-scanner.py
|
robinadaptor/chronon
|
630b3945824c1b1cd2ea67ca80835a9f669b9124
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 PIVX developers
# Copyright (c) 2018-2019 Chronon developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output("git rev-parse --show-toplevel", shell=True, universal_newlines=True)
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r", encoding="utf-8")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
# Collapse multiple lines into one
tempLine += row
# Line contains LogPrint or LogPrintf
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
# This line of code has a format specifier that requires checking number of associated arguments
# Determine the number of arguments provided, see if that matches the number of format specifiers
# Count the number of commas after the format specifier string. Check to see if it matches the number of format specifiers.
# Assumes quotes are not escaped in the specifier string and there are no percent signs when specifying the debug level.
# First, determine the position of the comma after the format specifier section, named commaAfterEndSpecifierStringIndex
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
# Count the number of commas after the specifier string
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
# Determine number of extra percents after specifier string
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
# Subtract extra from total count. This is the number of expected specifiers
# ignore %%
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print("Incorrect number of arguments for LogPrint(f) statement found.")
print(str(file) + ":" + str(lineCounter - tempCount))
print("Line = " + tempLine)
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print("")
incorrectInstanceCounter += 1
# Done with this multiline, clear tempLine
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
# No LogPrint, clear tempLine
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| 41.556604
| 167
| 0.569807
|
e322e9d55d8881d340b35329d3a0deaddc21dcdb
| 7,670
|
py
|
Python
|
test/functional/test_framework/test_node.py
|
smthcoin/smthcoin
|
3d212b3052297120ff3eee5e20fc3bab9eeddfd6
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
smthcoin/smthcoin
|
3d212b3052297120ff3eee5e20fc3bab9eeddfd6
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
smthcoin/smthcoin
|
3d212b3052297120ff3eee5e20fc3bab9eeddfd6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for smthcoind node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import subprocess
import time
from .util import (
assert_equal,
get_rpc_proxy,
rpc_url,
wait_until,
)
from .authproxy import JSONRPCException
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a smthcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
To make things easier for the test writer, a bit of magic is happening under the covers.
Any unrecognised messages will be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("LITECOIND", "smthcoind")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("LITECOINCLI", "smthcoin-cli"), self.datadir)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
def __getattr__(self, *args, **kwargs):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return self.rpc.__getattr__(*args, **kwargs)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr)
self.running = True
self.log.debug("smthcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the smthcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "smthcoind exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to smthcoind")
def get_wallet_rpc(self, wallet_name):
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes smthcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
class TestNodeCLI():
"""Interface to smthcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.args = []
self.binary = binary
self.datadir = datadir
self.input = None
def __call__(self, *args, input=None):
# TestNodeCLI is callable with smthcoin-cli command-line args
self.args = [str(arg) for arg in args]
self.input = input
return self
def __getattr__(self, command):
def dispatcher(*args, **kwargs):
return self.send_cli(command, *args, **kwargs)
return dispatcher
def send_cli(self, command, *args, **kwargs):
"""Run smthcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same smthcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.args
if named_args:
p_args += ["-named"]
p_args += [command] + pos_args + named_args
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
return json.loads(cli_stdout, parse_float=decimal.Decimal)
| 40.157068
| 248
| 0.640287
|
4cf307c3e707da871bbde14379a7a6d3c02d1a93
| 3,474
|
py
|
Python
|
openpyxl/worksheet/tests/test_page.py
|
VariantXYZ/openpyxl-variant
|
873cc3dda43578c098843ebe930325c879d20baf
|
[
"MIT"
] | null | null | null |
openpyxl/worksheet/tests/test_page.py
|
VariantXYZ/openpyxl-variant
|
873cc3dda43578c098843ebe930325c879d20baf
|
[
"MIT"
] | null | null | null |
openpyxl/worksheet/tests/test_page.py
|
VariantXYZ/openpyxl-variant
|
873cc3dda43578c098843ebe930325c879d20baf
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2010-2020 openpyxl
import pytest
from openpyxl.xml.functions import tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def PageMargins():
from .. page import PageMargins
return PageMargins
class TestPageMargins:
def test_ctor(self, PageMargins):
pm = PageMargins()
assert dict(pm) == {'bottom': '1', 'footer': '0.5', 'header': '0.5',
'left': '0.75', 'right': '0.75', 'top': '1'}
def test_write(self, PageMargins):
page_margins = PageMargins()
page_margins.left = 2.0
page_margins.right = 2.0
page_margins.top = 2.0
page_margins.bottom = 2.0
page_margins.header = 1.5
page_margins.footer = 1.5
xml = tostring(page_margins.to_tree())
expected = """
<pageMargins xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" left="2" right="2" top="2" bottom="2" header="1.5" footer="1.5"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def PrintPageSetup():
from .. page import PrintPageSetup
return PrintPageSetup
@pytest.fixture
def DummyWorksheet():
from openpyxl import Workbook
wb = Workbook()
return wb.active
class TestPageSetup:
def test_ctor(self, PrintPageSetup):
p = PrintPageSetup()
assert dict(p) == {}
p.scale = 1
assert p.scale == 1
p.paperHeight = "24.73mm"
assert p.paperHeight == "24.73mm"
assert p.cellComments == None
p.orientation = "default"
assert p.orientation == "default"
p.id = 'a12'
assert dict(p) == {'scale':'1', 'paperHeight': '24.73mm',
'orientation': 'default', 'id':'a12'}
def test_fitToPage(self, DummyWorksheet):
ws = DummyWorksheet
p = ws.page_setup
assert p.fitToPage is None
p.fitToPage = 1
assert p.fitToPage == True
def test_autoPageBreaks(self, DummyWorksheet):
ws = DummyWorksheet
p = ws.page_setup
assert p.autoPageBreaks is None
p.autoPageBreaks = 1
assert p.autoPageBreaks == True
def test_write(self, PrintPageSetup):
page_setup = PrintPageSetup()
page_setup.orientation = "landscape"
page_setup.paperSize = 3
page_setup.fitToHeight = False
page_setup.fitToWidth = True
xml = tostring(page_setup.to_tree())
expected = """
<pageSetup orientation="landscape" paperSize="3" fitToHeight="0" fitToWidth="1"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def PrintOptions():
from .. page import PrintOptions
return PrintOptions
class TestPrintOptions:
def test_ctor(self, PrintOptions):
p = PrintOptions()
assert dict(p) == {}
p.horizontalCentered = True
p.verticalCentered = True
assert dict(p) == {'verticalCentered': '1', 'horizontalCentered': '1'}
def test_write(self, PrintOptions):
print_options = PrintOptions()
print_options.horizontalCentered = True
print_options.verticalCentered = True
xml = tostring(print_options.to_tree())
expected = """
<printOptions horizontalCentered="1" verticalCentered="1"/>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
| 28.243902
| 164
| 0.613126
|
f0a8b00b799d2be6a09dd1567b39c88872c31f9b
| 655
|
py
|
Python
|
test/测试文件2.py
|
colinsusie/excelexporter
|
747cfe4794b62b03e9ac35d2f6cd9f6e3cfef117
|
[
"MIT"
] | 15
|
2020-08-04T03:50:47.000Z
|
2022-03-19T14:33:19.000Z
|
test/测试文件2.py
|
colinsusie/excelexporter
|
747cfe4794b62b03e9ac35d2f6cd9f6e3cfef117
|
[
"MIT"
] | 2
|
2021-02-24T08:02:14.000Z
|
2021-04-22T11:25:55.000Z
|
test/测试文件2.py
|
colinsusie/excelexporter
|
747cfe4794b62b03e9ac35d2f6cd9f6e3cfef117
|
[
"MIT"
] | 5
|
2020-08-15T02:16:19.000Z
|
2021-03-18T09:30:01.000Z
|
# -*- coding: utf-8 -*-
from excel_exporter import *
define = [
# 第一列:Excel列名 第二列:导出字段名 第三列:字段类型
['编号', 'no', Int()],
['职业', 'job', Int()],
['等级', 'level', Int()],
]
config = {
"source": "测试文件2.xlsx",
"sheet": "Sheet1",
"target": [
["./test2.js", "js", ET_OPT],
["./test2.lua", "lua", ET_OPT],
["./test2.json", "json"],
["./test2.py", "py"],
],
"key": "no",
}
def custom_key(key_value, row_dict):
return "{}_{}".format(row_dict["job"], row_dict["level"])
# def custom_row(key_value, row_dict):
# if not (1 <= row_dict["job"] <= 3):
# raise TypeError("职业字段只能1,2,3")
# return row_dict
# def verify_table(table):
# print("OK")
| 19.848485
| 58
| 0.558779
|
f9f78d6537bf19e77353c50dacc8367c4a373f62
| 7,261
|
py
|
Python
|
homeassistant/components/cover/device_trigger.py
|
ronytomen/core
|
2e9acf09b61d93a1b6b5dc5148822b85d65c5d79
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/cover/device_trigger.py
|
ronytomen/core
|
2e9acf09b61d93a1b6b5dc5148822b85d65c5d79
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/cover/device_trigger.py
|
ronytomen/core
|
2e9acf09b61d93a1b6b5dc5148822b85d65c5d79
|
[
"Apache-2.0"
] | null | null | null |
"""Provides device automations for Cover."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
state as state_trigger,
)
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
CONF_VALUE_TEMPLATE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
POSITION_TRIGGER_TYPES = {"position", "tilt_position"}
STATE_TRIGGER_TYPES = {"opened", "closed", "opening", "closing"}
POSITION_TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_TRIGGER_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_TRIGGER_TYPES),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
TRIGGER_SCHEMA = vol.Any(POSITION_TRIGGER_SCHEMA, STATE_TRIGGER_SCHEMA)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if not state or ATTR_SUPPORTED_FEATURES not in state.attributes:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add triggers for each entity that belongs to this integration
if supports_open_close:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opened",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closed",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opening",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closing",
}
)
if supported_features & SUPPORT_SET_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "position",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "tilt_position",
}
)
return triggers
async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List trigger capabilities."""
if config[CONF_TYPE] not in POSITION_TRIGGER_TYPES:
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] in STATE_TRIGGER_TYPES:
if config[CONF_TYPE] == "opened":
to_state = STATE_OPEN
elif config[CONF_TYPE] == "closed":
to_state = STATE_CLOSED
elif config[CONF_TYPE] == "opening":
to_state = STATE_OPENING
elif config[CONF_TYPE] == "closing":
to_state = STATE_CLOSING
state_config = {
CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
if config[CONF_TYPE] == "position":
position = "current_position"
if config[CONF_TYPE] == "tilt_position":
position = "current_tilt_position"
min_pos = config.get(CONF_ABOVE, -1)
max_pos = config.get(CONF_BELOW, 101)
value_template = f"{{{{ state.attributes.{position} }}}}"
numeric_state_config = {
CONF_PLATFORM: "numeric_state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
CONF_BELOW: max_pos,
CONF_ABOVE: min_pos,
CONF_VALUE_TEMPLATE: value_template,
}
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
| 33.004545
| 85
| 0.597025
|
3a54919ac0eb3e1d2137464091cfe2693eeb595e
| 11,886
|
py
|
Python
|
tests/scripts/thread-cert/test_srp_name_conflicts.py
|
yogeshsurapaneni/openthread
|
14fc9eb3a813331ffb56ba7b6025fc940ae15769
|
[
"BSD-3-Clause"
] | 2
|
2019-07-12T13:19:40.000Z
|
2019-07-15T13:39:04.000Z
|
tests/scripts/thread-cert/test_srp_name_conflicts.py
|
yogeshsurapaneni/openthread
|
14fc9eb3a813331ffb56ba7b6025fc940ae15769
|
[
"BSD-3-Clause"
] | 9
|
2020-03-05T03:02:18.000Z
|
2020-09-28T05:23:48.000Z
|
tests/scripts/thread-cert/test_srp_name_conflicts.py
|
yogeshsurapaneni/openthread
|
14fc9eb3a813331ffb56ba7b6025fc940ae15769
|
[
"BSD-3-Clause"
] | 1
|
2021-01-21T13:50:22.000Z
|
2021-01-21T13:50:22.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) 2021, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ipaddress
import unittest
import command
import thread_cert
# Test description:
# This test verifies if the SRP server can handle name conflicts correctly.
#
# Topology:
# LEADER (SRP server)
# / \
# / \
# / \
# ROUTER1 ROUTER2
#
SERVER = 1
CLIENT1 = 2
CLIENT2 = 3
class SrpNameConflicts(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
SUPPORT_NCP = False
TOPOLOGY = {
SERVER: {
'name': 'SRP_SERVER',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'panid': 0xface
},
CLIENT1: {
'name': 'SRP_CLIENT1',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1
},
CLIENT2: {
'name': 'SRP_CLIENT2',
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1
},
}
def test(self):
server = self.nodes[SERVER]
client_1 = self.nodes[CLIENT1]
client_2 = self.nodes[CLIENT2]
#
# 0. Start the server & client devices.
#
server.srp_server_set_enabled(True)
server.start()
self.simulator.go(5)
self.assertEqual(server.get_state(), 'leader')
self.simulator.go(5)
client_1.srp_server_set_enabled(False)
client_1.start()
self.simulator.go(5)
self.assertEqual(client_1.get_state(), 'router')
client_2.srp_server_set_enabled(False)
client_2.start()
self.simulator.go(5)
self.assertEqual(client_2.get_state(), 'router')
#
# 1. Register a single service and verify that it works.
#
client_1.srp_client_set_host_name('my-host-1')
client_1.srp_client_set_host_address('2001::1')
client_1.srp_client_start(server.get_addrs()[0], client_1.get_srp_server_port())
client_1.srp_client_add_service('my-service-1', '_ipps._tcp', 12345)
self.simulator.go(2)
# Verify that the client possesses correct service resources.
client_1_service = client_1.srp_client_get_services()[0]
self.assertEqual(client_1_service['instance'], 'my-service-1')
self.assertEqual(client_1_service['name'], '_ipps._tcp')
self.assertEqual(int(client_1_service['port']), 12345)
self.assertEqual(int(client_1_service['priority']), 0)
self.assertEqual(int(client_1_service['weight']), 0)
# Verify that the client receives a SUCCESS response for the server.
self.assertEqual(client_1_service['state'], 'Registered')
# Verify that the server accepts the SRP registration and stored
# the same service resources.
server_service = server.srp_server_get_services()[0]
self.assertEqual(server_service['deleted'], 'false')
self.assertEqual(server_service['instance'], client_1_service['instance'])
self.assertEqual(server_service['name'], client_1_service['name'])
self.assertEqual(int(server_service['port']), int(client_1_service['port']))
self.assertEqual(int(server_service['priority']), int(client_1_service['priority']))
self.assertEqual(int(server_service['weight']), int(client_1_service['weight']))
self.assertEqual(server_service['host'], 'my-host-1')
server_host = server.srp_server_get_hosts()[0]
self.assertEqual(server_host['deleted'], 'false')
self.assertEqual(server_host['fullname'], server_service['host_fullname'])
self.assertEqual(len(server_host['addresses']), 1)
self.assertEqual(ipaddress.ip_address(server_host['addresses'][0]), ipaddress.ip_address('2001::1'))
#
# 2. Register with the same host name from the second client and it should fail.
#
client_2.srp_client_set_host_name('my-host-1')
client_2.srp_client_set_host_address('2001::2')
client_2.srp_client_start(server.get_addrs()[0], client_2.get_srp_server_port())
client_2.srp_client_add_service('my-service-2', '_ipps._tcp', 12345)
self.simulator.go(2)
# It is expected that the registration will be rejected.
client_2_service = client_2.srp_client_get_services()[0]
self.assertEqual(client_2_service['state'], 'Adding')
self.assertEqual(client_2.srp_client_get_host_state(), 'ToAdd')
self.assertEqual(len(server.srp_server_get_services()), 1)
self.assertEqual(len(server.srp_server_get_hosts()), 1)
client_2.srp_client_clear_host()
client_2.srp_client_stop()
#
# 3. Register with the same service name from the second client and it should fail.
#
client_2.srp_client_set_host_name('my-host-2')
client_2.srp_client_set_host_address('2001::2')
client_2.srp_client_start(server.get_addrs()[0], client_2.get_srp_server_port())
client_2.srp_client_add_service('my-service-1', '_ipps._tcp', 12345)
self.simulator.go(2)
# It is expected that the registration will be rejected.
client_2_service = client_2.srp_client_get_services()[0]
self.assertEqual(client_2_service['state'], 'Adding')
self.assertEqual(client_2.srp_client_get_host_state(), 'ToAdd')
self.assertEqual(len(server.srp_server_get_services()), 1)
self.assertEqual(len(server.srp_server_get_hosts()), 1)
client_2.srp_client_clear_host()
client_2.srp_client_stop()
#
# 4. Register with different host & service instance name, it should succeed.
#
client_2.srp_client_set_host_name('my-host-2')
client_2.srp_client_set_host_address('2001::2')
client_2.srp_client_start(server.get_addrs()[0], client_2.get_srp_server_port())
client_2.srp_client_add_service('my-service-2', '_ipps._tcp', 12345)
self.simulator.go(2)
# It is expected that the registration will be accepted.
client_2_service = client_2.srp_client_get_services()[0]
self.assertEqual(client_2_service['state'], 'Registered')
self.assertEqual(client_2.srp_client_get_host_state(), 'Registered')
self.assertEqual(len(server.srp_server_get_services()), 2)
self.assertEqual(len(server.srp_server_get_hosts()), 2)
self.assertEqual(server.srp_server_get_host('my-host-2')['deleted'], 'false')
self.assertEqual(server.srp_server_get_service('my-service-2', '_ipps._tcp')['deleted'], 'false')
# Remove the host and all services registered on the SRP server.
client_2.srp_client_remove_host(remove_key=True)
self.simulator.go(2)
client_2.srp_client_clear_host()
client_2.srp_client_stop()
#
# 5. Register with the same service instance name before its KEY LEASE expires,
# it is expected to fail.
#
# Remove the service instance from SRP server but retains its name.
client_1.srp_client_remove_service('my-service-1', '_ipps._tcp')
self.simulator.go(2)
client_2.srp_client_set_host_name('my-host-2')
client_2.srp_client_set_host_address('2001::2')
client_2.srp_client_start(server.get_addrs()[0], client_2.get_srp_server_port())
client_2.srp_client_add_service('my-service-1', '_ipps._tcp', 12345)
self.simulator.go(2)
# It is expected that the registration will be rejected.
client_2_service = client_2.srp_client_get_services()[0]
self.assertEqual(client_2_service['state'], 'Adding')
self.assertEqual(client_2.srp_client_get_host_state(), 'ToAdd')
# The service 'my-service-1' is removed but its name is retained.
# This is why we can see the service record on the SRP server.
self.assertEqual(len(server.srp_server_get_services()), 1)
self.assertEqual(len(server.srp_server_get_hosts()), 1)
self.assertEqual(server.srp_server_get_host('my-host-1')['deleted'], 'false')
self.assertEqual(server.srp_server_get_service('my-service-1', '_ipps._tcp')['deleted'], 'true')
client_2.srp_client_clear_host()
client_2.srp_client_stop()
#
# 6. The service instance name can be re-used by another client when
# the service has been permanently removed (the KEY resource is
# removed) from the host.
#
# Client 1 adds back the service, it should success.
client_1.srp_client_add_service('my-service-1', '_ipps._tcp', 12345)
self.simulator.go(2)
self.assertEqual(len(server.srp_server_get_services()), 1)
self.assertEqual(len(server.srp_server_get_hosts()), 1)
self.assertEqual(server.srp_server_get_host('my-host-1')['deleted'], 'false')
self.assertEqual(server.srp_server_get_service('my-service-1', '_ipps._tcp')['deleted'], 'false')
# Permanently removes the service instance.
client_1.srp_client_remove_host(remove_key=True)
self.simulator.go(2)
self.assertEqual(len(server.srp_server_get_services()), 0)
self.assertEqual(len(server.srp_server_get_hosts()), 0)
# Client 2 registers the same host & service instance name with Client 1.
client_2.srp_client_stop()
client_2.srp_client_clear_host()
client_2.srp_client_set_host_name('my-host-1')
client_2.srp_client_set_host_address('2001::2')
client_2.srp_client_start(server.get_addrs()[0], client_2.get_srp_server_port())
client_2.srp_client_add_service('my-service-1', '_ipps._tcp', 12345)
self.simulator.go(2)
# It is expected that client 2 will success because those names has been
# released by client 1.
self.assertEqual(len(server.srp_server_get_services()), 1)
self.assertEqual(len(server.srp_server_get_hosts()), 1)
self.assertEqual(server.srp_server_get_host('my-host-1')['deleted'], 'false')
self.assertEqual(server.srp_server_get_service('my-service-1', '_ipps._tcp')['deleted'], 'false')
if __name__ == '__main__':
unittest.main()
| 42.45
| 108
| 0.676426
|
9a3b64e364bfe2acc776d3761f10f9ed04ff1561
| 1,209
|
py
|
Python
|
src/gamesbyexample/numeralsystems.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | null | null | null |
src/gamesbyexample/numeralsystems.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | null | null | null |
src/gamesbyexample/numeralsystems.py
|
spp2/PythonStdioGames
|
7edc6a07ef816a44579800e773f30217541971fa
|
[
"MIT"
] | null | null | null |
"""Numeral System Counters, by Al Sweigart al@inventwithpython.com
Shows equivalent numbers in decimal, hexadecimal, and binary.
This and other games are available at https://nostarch.com/XX
Tags: tiny, math"""
__version__ = 0
print('''Numeral System Counters, by Al Sweigart al@inventwithpython.com
This program shows you equivalent numbers in decimal (base 10),
hexadecimal (base 16), and binary (base 2) numeral systems.
(Ctrl-C to quit.)
''')
while True:
response = input('Enter the starting number (e.g. 0) > ')
if response == '':
response = '0' # Start at 0 by default.
break
if response.isdecimal():
break
print('Please enter a number.')
start = int(response)
while True:
response = input('Enter how many numbers to display (e.g. 1000) > ')
if response == '':
response = '1000' # Display 1000 numbers by default.
break
if response.isdecimal():
break
print('Please enter a number.')
amount = int(response)
for number in range(start, start + amount): # Main program loop.
hexNumber = hex(number)[2:].upper()
binNumber = bin(number)[2:]
print('DEC:', number, ' HEX:', hexNumber, ' BIN:', binNumber)
| 30.225
| 72
| 0.659222
|
ac4aa2cd5ea6e55db5b751566f5ae13feb28a20c
| 10,847
|
py
|
Python
|
python-package/lets_plot/geo_data/core.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
python-package/lets_plot/geo_data/core.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
python-package/lets_plot/geo_data/core.py
|
OLarionova-HORIS/lets-plot
|
89e30a574fe2de3da17186acdbe1cf427d66d87f
|
[
"MIT"
] | null | null | null |
from typing import Any, Union, List, Optional
import numpy as np
from pandas import Series
from .gis.geocoding_service import GeocodingService
from .gis.geometry import GeoPoint
from .gis.request import RequestBuilder, RequestKind
from .gis.response import Response, SuccessResponse
from .regions import Regions, _raise_exception, _to_level_kind, _to_scope
from .regions_builder import RegionsBuilder
from .type_assertion import assert_list_type
__all__ = [
'distance',
'regions_builder',
'regions',
'regions_country',
'regions_state',
'regions_county',
'regions_city',
'regions_xy',
]
UNITS_DICT = {
'mi': 3959,
'km': 6371
}
GEOFUNC_TYPES = {
'centroids': 'centroids',
'boundaries': 'boundaries',
'limits': 'limits',
'region': 'regions'
}
def _to_coords(lon: Optional[Union[float, Series, List[float]]], lat: Optional[Union[float, Series, List[float]]]) -> List[GeoPoint]:
if type(lon) != type(lat):
raise ValueError('lon and lat have different types')
if isinstance(lon, float):
return [GeoPoint(lon, lat)]
if isinstance(lon, Series):
lon = lon.tolist()
lat = lat.tolist()
if isinstance(lon, list):
assert_list_type(lon, float)
assert_list_type(lat, float)
return [GeoPoint(lo, la) for lo, la in zip(lon, lat)]
def regions_xy(lon, lat, level, within=None):
request = RequestBuilder() \
.set_request_kind(RequestKind.reverse) \
.set_reverse_coordinates(_to_coords(lon, lat)) \
.set_level(_to_level_kind(level)) \
.set_reverse_scope(_to_scope(within)) \
.build()
response: Response = GeocodingService().do_request(request)
if not isinstance(response, SuccessResponse):
_raise_exception(response)
return Regions(response.level, response.features, False)
def regions_builder(level=None, request=None, within=None, highlights=False) -> RegionsBuilder:
"""
Create a RegionBuilder class by level and request. Allows to refine ambiguous request with
where method. build() method creates Regions object or shows details for ambiguous result.
regions_builder(level, request, within)
Parameters
----------
level : ['country' | 'state' | 'county' | 'city' | None]
The level of administrative division. Default is a 'state'.
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
For 'state' level:
-'US-48' returns continental part of United States (48 states) in a compact form.
within : [array | string | Regions | None]
Data can be filtered by within name.
If within is array then request and within will be merged positionally (size should be equal).
If within is Regions then request will be searched in any of these regions.
'US-48' includes continental part of United States (48 states).
Returns
-------
RegionsBuilder object :
Note
-----
regions_builder() allows to refine ambiguous request with where() method. Call build() method to create Regions object
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r = regions_builder(level='city', request=['moscow', 'york']).where('york', regions_state('New York')).build()
>>> r
"""
return RegionsBuilder(level, request, within, highlights)
def regions(level=None, request=None, within=None) -> Regions:
"""
Create a Regions class by level and request.
regions(level, request, within)
Parameters
----------
level : ['country' | 'state' | 'county' | 'city' | None]
The level of administrative division. None is for autodetection, falls back to a 'state' in case of ambiguity.
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
None with explicit level returns all corresponding regions, like all countries i.e. regions(level='country').
For 'state' level:
-'US-48' returns continental part of United States (48 states) in a compact form.
within : [array | string | Regions| None]
Data can be filtered by within name.
If within is array then request and within will be merged positionally (size should be equal).
If within is Regions then request will be searched in any of these regions.
'US-48' includes continental part of United States (48 states).
Returns
-------
Regions object :
Note
-----
regions() is used to get name and object id by level and request.
If the given names are not found exception will be thrown.
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r = regions(level='country', request=['Germany', 'USA'])
>>> r
"""
return RegionsBuilder(level=level, request=request, scope=within).build()
def regions_country(request=None):
"""
Create a Regions class for country level by request.
regions_country(request)
Parameters
----------
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
Returns
-------
Regions object :
Note
-----
regions_country() is used to get name and object id by request.
If the given names are not found exception will be thrown.
See also regions().
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r_country = regions_country(request=['Germany', 'USA'])
>>> r_country
"""
return regions('country', request, None)
def regions_state(request=None, within=None):
"""
Create a Regions class for state level by request.
regions_state(request, within)
Parameters
----------
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
For 'state' level:
-'US-48' returns continental part of United States (48 states) in a compact form.
within : [array | string | Regions| None]
Data can be filtered by within name.
If within is array then filter and within will be merged positionally (size should be equal).
If within is Regions then request will be searched in any of these regions.
'US-48' includes continental part of United States (48 states).
Returns
-------
Regions object :
Note
-----
regions_state() is used to get name and object id by request.
If the given names are not found exception will be thrown.
See also regions().
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r_state = regions_state(request=['Texas', 'Iowa'], within='USA')
>>> r_state
"""
return regions('state', request, within)
def regions_county(request=None, within=None):
"""
Create a Regions class for county level by request.
regions_county(request, within)
Parameters
----------
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
within : [array | string | Regions| None]
Data can be filtered by within name.
If within is array then request and within will be merged positionally (size should be equal).
If within is Regions then request will be searched in any of these regions.
'US-48' includes continental part of United States (48 states).
Returns
-------
Regions object :
Note
-----
regions_county() is used to get name and object id by request.
If the given names are not found exception will be thrown.
See also regions().
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r_county = regions_county(request=['Calhoun County', 'Howard County'], within='Texas')
>>> r_county
"""
return regions('county', request, within)
def regions_city(request=None, within=None):
"""
Create a Regions class for city level by request.
regions_city(request, within)
Parameters
----------
request : [array | string | None]
Data can be filtered by full names at any level (only exact matching).
within : [array | string | Regions| None]
Data can be filtered by within name.
If within is array then request and within will be merged positionally (size should be equal).
If within is Regions then request will be searched in any of these regions.
'US-48' includes continental part of United States (48 states).
Returns
-------
Regions object :
Note
-----
regions_city() is used to get name and object id by request.
If the given names are not found exception will be thrown.
See also regions().
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> r_city = regions_city(request=['New York', 'Los Angeles'])
>>> r_city
"""
return regions('city', request, within)
def distance(lon0, lat0, lon1, lat1, units='km'):
"""
Calculate the distance between two points. Returns result in kilometers or miles.
distance(lon0, lat0, lon1, lat1, units)
Parameters
----------
lon0: number
Longitude coordinate of the first point.
lat0: number
Latitude coordinate of the first point.
lon1: number
Longitude coordinate of the second point.
lat1: number
Latitude coordinate of the second point.
units: [string | None]
The units in which the result will be obtained.
There are shorthands for values: 'mi'(miles), 'km'(kilometers).
Default is kilometers.
Returns
-------
object : float
Note
-----
distance() calculates the distance between two points.
Examples
---------
.. jupyter-execute::
>>> from lets_plot.geo_data import *
>>> dist = distance(-99.25061, 31.25044, -105.50083, 39.00027)
>>> dist
"""
return _calc_distance(lon0, lat0, lon1, lat1, units)
def _calc_distance(lon0, lat0, lon1, lat1, u):
r = _prepare_units(u)
lon0, lat0, lon1, lat1 = map(np.radians, [lon0, lat0, lon1, lat1])
dlon = lon1 - lon0
dlat = lat1 - lat0
a = np.sin(dlat / 2.0) ** 2 + np.cos(lat0) * np.cos(lat1) * np.sin(dlon / 2.0) ** 2
c = 2 * np.arcsin(np.sqrt(a))
return c * r
def _prepare_units(units: Any) -> float:
try:
return UNITS_DICT[units]
except KeyError:
raise ValueError('Wrong units: {}. The units can take the following values: '
'mi (miles), km (kilometers).'.format(units))
| 30.298883
| 133
| 0.637688
|
d0c5a912c387d5a7f17e50aaf78cd978dac19c71
| 6,529
|
py
|
Python
|
proxy/core/acceptor/executors.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/core/acceptor/executors.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/core/acceptor/executors.py
|
Flared/proxy.py
|
7199459c69a717bb55f932230ae9a39707430149
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import socket
import logging
import argparse
import threading
import multiprocessing
from multiprocessing import connection
from multiprocessing.reduction import send_handle
from typing import Any, Optional, List, Tuple
from .work import Work
from .remote import RemoteExecutor
from ..connection import TcpClientConnection
from ..event import EventQueue, eventNames
from ...common.flag import flags
from ...common.constants import DEFAULT_NUM_WORKERS, DEFAULT_THREADLESS
logger = logging.getLogger(__name__)
flags.add_argument(
'--threadless',
action='store_true',
default=DEFAULT_THREADLESS,
help='Default: ' + ('True' if DEFAULT_THREADLESS else 'False') + '. ' +
'Enabled by default on Python 3.8+ (mac, linux). ' +
'When disabled a new thread is spawned '
'to handle each client connection.',
)
flags.add_argument(
'--threaded',
action='store_true',
default=not DEFAULT_THREADLESS,
help='Default: ' + ('True' if not DEFAULT_THREADLESS else 'False') + '. ' +
'Disabled by default on Python < 3.8 and windows. ' +
'When enabled a new thread is spawned '
'to handle each client connection.',
)
flags.add_argument(
'--num-workers',
type=int,
default=DEFAULT_NUM_WORKERS,
help='Defaults to number of CPU cores.',
)
class ThreadlessPool:
"""Manages lifecycle of threadless pool and delegates work to them
using a round-robin strategy.
Example usage::
with ThreadlessPool(flags=...) as pool:
while True:
time.sleep(1)
If necessary, start multiple threadless pool with different
work classes.
"""
def __init__(
self,
flags: argparse.Namespace,
event_queue: Optional[EventQueue] = None,
) -> None:
self.flags = flags
self.event_queue = event_queue
# Threadless worker communication states
self.work_queues: List[connection.Connection] = []
self.work_pids: List[int] = []
self.work_locks: List[multiprocessing.synchronize.Lock] = []
# List of threadless workers
self._workers: List[RemoteExecutor] = []
self._processes: List[multiprocessing.Process] = []
def __enter__(self) -> 'ThreadlessPool':
self.setup()
return self
def __exit__(self, *args: Any) -> None:
self.shutdown()
@staticmethod
def delegate(
worker_pid: int,
work_queue: connection.Connection,
work_lock: multiprocessing.synchronize.Lock,
conn: socket.socket,
addr: Optional[Tuple[str, int]],
unix_socket_path: Optional[str] = None,
) -> None:
"""Utility method to delegate a work to threadless executor pool."""
with work_lock:
# Accepted client address is empty string for
# unix socket domain, avoid sending empty string
# for optimization.
if not unix_socket_path:
work_queue.send(addr)
send_handle(
work_queue,
conn.fileno(),
worker_pid,
)
conn.close()
@staticmethod
def start_threaded_work(
flags: argparse.Namespace,
conn: socket.socket,
addr: Optional[Tuple[str, int]],
event_queue: Optional[EventQueue] = None,
publisher_id: Optional[str] = None,
) -> Tuple[Work[TcpClientConnection], threading.Thread]:
"""Utility method to start a work in a new thread."""
work = flags.work_klass(
TcpClientConnection(conn, addr),
flags=flags,
event_queue=event_queue,
upstream_conn_pool=None,
)
# TODO: Keep reference to threads and join during shutdown.
# This will ensure connections are not abruptly closed on shutdown
# for threaded execution mode.
thread = threading.Thread(target=work.run)
thread.daemon = True
thread.start()
work.publish_event(
event_name=eventNames.WORK_STARTED,
event_payload={'fileno': conn.fileno(), 'addr': addr},
publisher_id=publisher_id or 'thread#{0}'.format(
thread.ident,
),
)
return (work, thread)
def setup(self) -> None:
"""Setup threadless processes."""
if self.flags.threadless:
for index in range(self.flags.num_workers):
self._start_worker(index)
logger.info(
'Started {0} threadless workers'.format(
self.flags.num_workers,
),
)
def shutdown(self) -> None:
"""Shutdown threadless processes."""
if self.flags.threadless:
self._shutdown_workers()
logger.info(
'Stopped {0} threadless workers'.format(
self.flags.num_workers,
),
)
def _start_worker(self, index: int) -> None:
"""Starts a threadless worker."""
self.work_locks.append(multiprocessing.Lock())
pipe = multiprocessing.Pipe()
self.work_queues.append(pipe[0])
w = RemoteExecutor(
iid=index,
work_queue=pipe[1],
flags=self.flags,
event_queue=self.event_queue,
)
self._workers.append(w)
p = multiprocessing.Process(target=w.run)
# p.daemon = True
self._processes.append(p)
p.start()
assert p.pid
self.work_pids.append(p.pid)
logger.debug('Started threadless#%d process#%d', index, p.pid)
def _shutdown_workers(self) -> None:
"""Pop a running threadless worker and clean it up."""
for index in range(self.flags.num_workers):
self._workers[index].running.set()
for _ in range(self.flags.num_workers):
pid = self.work_pids[-1]
self._processes.pop().join()
self._workers.pop()
self.work_pids.pop()
self.work_queues.pop().close()
logger.debug('Stopped threadless process#%d', pid)
self.work_locks = []
| 32.004902
| 86
| 0.603768
|
f47690ae6bb55388533a42963e580b535d0479e2
| 6,268
|
py
|
Python
|
src/pyrin/template/handlers/package.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/template/handlers/package.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyrin/template/handlers/package.py
|
wilsonGmn/pyrin
|
25dbe3ce17e80a43eee7cfc7140b4c268a6948e0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
template handlers package module.
"""
import os
import pyrin.utils.path as path_utils
import pyrin.application.services as application_services
from pyrin.core.structs import DTO
from pyrin.template.decorators import template_handler
from pyrin.template.enumerations import TemplateCLIHandlersEnum
from pyrin.template.handlers.base import TemplateHandlerWithInterfaceInputBase
from pyrin.template.handlers.exceptions import InvalidPackagePathError, \
InvalidPackageClassNameError
class PackageTemplateHandlerBase(TemplateHandlerWithInterfaceInputBase):
"""
package template handler base class.
"""
def __init__(self, name, source):
"""
initializes an instance of PackageTemplateHandlerBase.
:param str name: name of the handler.
each handler must have a unique name.
:param str source: source directory of template files.
"""
self._package_path = None
self._package_full_path = None
self._package_name = None
self._package_full_name = None
self._package_title = None
self._package_alias = None
self._package_class_name = None
self._application_path = application_services.get_application_main_package_path()
self._working_directory = application_services.get_working_directory()
super().__init__(name, source)
def _show_interface(self, package_path=None, package_class_name=None):
"""
shows cli prompt to get inputs from user.
:param str package_path: the new package path. it must be a relative
path inside application main package path.
:param str package_class_name: the new package class name.
:raises InvalidPackagePathError: invalid package path error.
:raises InvalidPackageClassNameError: invalid package class name error.
"""
if package_path is None:
package_path = input('Please input the new package relative path: ')
if package_class_name is None:
package_class_name = input('Please input the new package class name: ')
self._set_attributes(package_path, package_class_name)
def _validate_inputs(self, package_path, package_class_name):
"""
validates the inputs to be used by this handler.
:param str package_path: the new package path. it must be a relative
path inside application main package path.
:param str package_class_name: the new package class name.
:raises InvalidPackagePathError: invalid package path error.
:raises InvalidPackageClassNameError: invalid package class name error.
"""
if package_path in (None, '') or package_path.isspace() or \
os.path.isabs(package_path):
raise InvalidPackagePathError('New package path is invalid.')
if package_class_name in (None, '') or package_class_name.isspace():
raise InvalidPackageClassNameError('New package class name is invalid.')
def _set_attributes(self, package_path, package_class_name):
"""
sets the required attributes based on given inputs.
:param str package_path: the new package path. it must be a relative
path inside application main package path.
:param str package_class_name: the new package class name.
:raises InvalidPackagePathError: invalid package path error.
:raises InvalidPackageClassNameError: invalid package class name error.
"""
self._validate_inputs(package_path, package_class_name)
self._package_path = package_path.rstrip(os.path.sep).rstrip(
os.path.altsep).replace(' ', '').lower()
self._package_full_path = os.path.abspath(os.path.join(
self._application_path, self._package_path))
self._package_title = ' '.join(self._package_path.split(os.path.sep)).lower().strip()
self._package_alias = '_'.join(self._package_path.split(os.path.sep)).lower().strip()
self._package_name = '.'.join(self._package_path.split(os.path.sep)).lower().strip()
self._package_full_name = path_utils.get_package_name(self._package_full_path,
self._working_directory)
self._package_class_name = package_class_name.replace(' ', '')
self._target = self._package_full_path
def _get_file_patterns(self):
"""
gets the file patterns that should be included in replacement operation.
:rtype: list[str]
"""
return ['.py']
def _get_data(self):
"""
gets the data required in template generation to replace in files.
:rtype: dict
"""
return DTO(PACKAGE_NAME=self._package_name,
PACKAGE_TITLE=self._package_title,
PACKAGE_ALIAS=self._package_alias,
PACKAGE_CLASS_NAME=self._package_class_name,
PACKAGE_FULL_NAME=self._package_full_name)
@template_handler()
class PackageTemplateHandler(PackageTemplateHandlerBase):
"""
package template handler class.
this template handler will be used to create new application packages.
"""
def __init__(self):
"""
initializes an instance of PackageTemplateHandler.
"""
pyrin_path = application_services.get_pyrin_main_package_path()
source = os.path.abspath(os.path.join(pyrin_path, 'template', 'files', 'package'))
super().__init__(TemplateCLIHandlersEnum.PACKAGE, source)
@template_handler()
class EmptyPackageTemplateHandler(PackageTemplateHandlerBase):
"""
empty package template handler class.
this template handler will be used to create new empty application packages.
"""
def __init__(self):
"""
initializes an instance of EmptyPackageTemplateHandler.
"""
pyrin_path = application_services.get_pyrin_main_package_path()
source = os.path.abspath(os.path.join(pyrin_path, 'template', 'files', 'empty_package'))
super().__init__(TemplateCLIHandlersEnum.EMPTY_PACKAGE, source)
| 36.231214
| 96
| 0.674697
|
890a3cf095a862b0a2fe68526ab9d6e37f489ca2
| 321,689
|
py
|
Python
|
Uncertainty/data/case-ln/case_ln_142.py
|
thanever/SOC
|
9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4
|
[
"MIT"
] | null | null | null |
Uncertainty/data/case-ln/case_ln_142.py
|
thanever/SOC
|
9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4
|
[
"MIT"
] | null | null | null |
Uncertainty/data/case-ln/case_ln_142.py
|
thanever/SOC
|
9f30d1a9c7610a68de9c178a1170bdf1c8ca11d4
|
[
"MIT"
] | null | null | null |
from numpy import array
def case_ln_142():
ppc = {"version": '2'}
ppc["baseMVA"] = 100.0
ppc["bus"] = array([
[1.0, 1.0, 53.7196, 14.3252, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[3.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[4.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[5.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[6.0, 1.0, 10.7439, 3.9394, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[7.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[8.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[9.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 11.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[10.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 11.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[11.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[12.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[13.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[14.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[15.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[16.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[17.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[18.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[19.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[20.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[21.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[22.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[23.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[24.0, 2.0, 3.5813, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 6.3, 1.0, 1.1, 0.95, 0.6, 10 ],
[25.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[26.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[27.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[28.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[29.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[30.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[31.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[32.0, 2.0, 2.5069, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[33.0, 2.0, 2.1488, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[34.0, 2.0, 2.1488, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[35.0, 2.0, 5.0138, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[36.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[37.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[38.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[39.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[40.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[41.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[42.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[43.0, 1.0, 60.739, 2.2777, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[44.0, 1.0, 71.6262, 4.4838, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[45.0, 1.0, 71.6262, 4.4838, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[46.0, 1.0, 71.6262, 4.4838, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[47.0, 1.0, 71.6262, 4.4838, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[48.0, 1.0, 35.8131, 4.6629, 1e-06, -1e-06, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[49.0, 1.0, 74.4912, 4.6629, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[50.0, 1.0, 14.8982, 4.6629, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[51.0, 1.0, 42.9757, 14.3252, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[52.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[53.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[54.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[55.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[56.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[57.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[58.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[59.0, 1.0, 71.6262, 4.4838, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[107.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[108.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[109.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[110.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[111.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[112.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[113.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[114.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[115.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[116.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[117.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[118.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[119.0, 1.0, 85.9514, 42.9757, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[120.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[121.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[122.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[123.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[307.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[310.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[315.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[316.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[482.0, 1.0, 0.0, 0.0, 0.0, -0.99173882, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[483.0, 1.0, 0.0, 0.0, 0.0, -0.99173882, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[484.0, 1.0, 0.0, 0.0, 0.0, -0.99173882, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[499.0, 1.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[500.0, 1.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[508.0, 1.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[539.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[540.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[541.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[542.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 2.0, 1.0, 0.0, 500.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[552.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[553.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[556.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[557.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1418.0, 1.0, 100.2766, 28.6505, 5e-07, -5e-07, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1454.0, 1.0, 49.422, 13.609, 5e-07, -5e-07, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1473.0, 1.0, 116.7506, 21.4878, 5e-07, -5e-07, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1545.0, 1.0, 46.557, 10.7439, 5e-07, -5e-07, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1555.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1556.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1557.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1558.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1559.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1560.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1561.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1562.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1563.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1564.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1565.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1566.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1567.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1568.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1569.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1570.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1571.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1572.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1573.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1574.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1575.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1576.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1577.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1578.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1579.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1580.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1581.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1582.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1583.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1584.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1585.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1586.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1587.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1588.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1589.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1590.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1591.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1592.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1593.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1594.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1595.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1596.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1597.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1598.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1599.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1600.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1601.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1602.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1603.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1604.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1605.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1606.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1607.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1608.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1609.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1610.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1611.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1612.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1613.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1614.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1615.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1616.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1617.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1618.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1619.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1620.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1621.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1622.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1623.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1624.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1625.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1626.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1627.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1628.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1629.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1630.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1631.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1632.0, 2.0, 5.0138, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1633.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1634.0, 2.0, 5.0138, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1635.0, 1.0, 214.8785, 25.6493, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1641.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1642.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1643.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1644.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1645.0, 2.0, 3.5813, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 6.3, 1.0, 1.1, 0.95, 0.6, 10 ],
[1646.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1647.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1648.0, 2.0, 5.0138, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1649.0, 2.0, 2.5069, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1650.0, 2.0, 5.0138, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1651.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.7, 1.0, 1.1, 0.95, 0.6, 10 ],
[1652.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1653.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.7, 1.0, 1.1, 0.95, 0.6, 10 ],
[1654.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.7, 1.0, 1.1, 0.95, 0.6, 10 ],
[1655.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1656.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1657.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1658.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1659.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1660.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1661.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1662.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1663.0, 3.0, 42.9757, 7.8789, 0.0, 0.0, 1.0, 1.0, 0.0, 27.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1664.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.7, 1.0, 1.1, 0.95, 0.6, 10 ],
[1665.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1666.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1667.0, 2.0, 31.5871, 9.0249, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1668.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1669.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1670.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1671.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1672.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1673.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1674.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.7, 1.0, 1.1, 0.95, 0.6, 10 ],
[1675.0, 2.0, 11.2811, 3.7604, 0.0, 0.0, 1.0, 1.0, 0.0, 18.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1676.0, 2.0, 11.2811, 3.7604, 0.0, 0.0, 1.0, 1.0, 0.0, 18.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1677.0, 2.0, 11.2811, 4.1042, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1678.0, 2.0, 11.2811, 4.1042, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1679.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1680.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1681.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1682.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1683.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1684.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1685.0, 2.0, 6.7687, 3.0083, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1686.0, 2.0, 11.2811, 4.1042, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1687.0, 2.0, 11.2811, 4.1042, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1688.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1689.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1690.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 35.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1691.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1692.0, 2.0, 42.9757, 7.8789, 0.0, 0.0, 1.0, 1.0, 0.0, 27.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1693.0, 2.0, 12.8927, 4.3405, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1694.0, 2.0, 12.8927, 4.3405, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1695.0, 2.0, 12.8927, 4.3405, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1696.0, 2.0, 12.8927, 4.3405, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1697.0, 2.0, 21.4878, 6.4463, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1698.0, 2.0, 21.4878, 6.4463, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1699.0, 2.0, 21.4878, 6.4463, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1700.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1701.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1702.0, 2.0, 12.8927, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1703.0, 2.0, 12.8927, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1704.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1705.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1706.0, 2.0, 12.8927, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 16.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1707.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1708.0, 2.0, 5.7301, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1709.0, 2.0, 5.7301, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1710.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 18.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1711.0, 2.0, 12.8927, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1712.0, 2.0, 12.8927, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1713.0, 2.0, 10.7439, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1714.0, 2.0, 10.7439, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1715.0, 2.0, 10.7439, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1716.0, 2.0, 10.7439, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1717.0, 2.0, 34.3806, 6.4607, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1718.0, 2.0, 34.3806, 6.4607, 0.0, 0.0, 1.0, 1.0, 0.0, 24.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1719.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1720.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1721.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1722.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1723.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1724.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 18.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1725.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 18.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1726.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1727.0, 2.0, 7.1626, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1728.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1729.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1730.0, 2.0, 8.5951, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1731.0, 2.0, 8.5951, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1732.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1733.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1734.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1735.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1736.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1737.0, 2.0, 8.5951, 2.6072, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1738.0, 2.0, 8.5951, 2.5785, 0.0, 0.0, 1.0, 1.0, 0.0, 15.75, 1.0, 1.1, 0.95, 0.6, 10 ],
[1739.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1740.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1741.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1742.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1743.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1744.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 22.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1745.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 22.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1746.0, 2.0, 78.7888, 25.0692, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1747.0, 2.0, 7.1626, 2.1488, 0.0, 0.0, 1.0, 1.0, 0.0, 13.8, 1.0, 1.1, 0.95, 0.6, 10 ],
[1748.0, 2.0, 30.083, 7.8144, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1749.0, 2.0, 30.083, 7.8144, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1750.0, 2.0, 30.083, 7.8144, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1751.0, 2.0, 30.083, 7.8144, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1752.0, 2.0, 15.0415, 3.9036, 0.0, 0.0, 1.0, 1.0, 0.0, 20.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1754.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1755.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1756.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1757.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1758.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1759.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1760.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1761.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1762.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1763.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1764.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1765.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1766.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1767.0, 1.0, 71.6262, 4.6772, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1768.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1769.0, 1.0, 71.6262, 4.4838, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1770.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1771.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1772.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1773.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1774.0, 1.0, 39.3944, 5.1428, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1775.0, 1.0, 71.6262, 4.4838, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1776.0, 1.0, 35.8131, 4.6772, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1777.0, 1.0, 60.8822, 14.3252, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1778.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1779.0, 1.0, 35.8131, 4.6772, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1780.0, 1.0, 107.4392, 15.944, 1e-06, -1e-06, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1781.0, 1.0, 39.3944, 5.1428, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1782.0, 1.0, 37.2456, 4.8634, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1783.0, 1.0, 37.2456, 4.8634, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1784.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1785.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1786.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1787.0, 1.0, 39.3944, 15.7578, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1788.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1789.0, 1.0, 0.0, 0.0, 0.0, -0.99173882, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1790.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1791.0, 1.0, 239.0451, 72.5644, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1792.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1793.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1794.0, 1.0, 28.6505, 7.1626, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1795.0, 1.0, 28.2279, 3.9967, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1796.0, 1.0, 71.6262, 24.3672, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1797.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1798.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1799.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1800.0, 1.0, 74.4912, 25.3413, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1801.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1802.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1803.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1804.0, 1.0, 52.3874, 31.4367, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1805.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1806.0, 1.0, 19.3606, -7.2916, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1807.0, 1.0, 71.6262, 14.3252, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1808.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1809.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1810.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1811.0, 1.0, 0.0, 0.0, 0.0, -2.40000384, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1812.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1813.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1814.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1815.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1816.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1817.0, 1.0, 7.091, 1.2176, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1818.0, 1.0, 58.9913, 8.9604, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1819.0, 1.0, 3.524, 0.881, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1820.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1821.0, 1.0, 41.2065, 9.2398, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1822.0, 1.0, 71.6262, 4.4838, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1823.0, 1.0, 35.8131, 24.3672, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1824.0, 1.0, 38.893, 6.7329, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1825.0, 1.0, 6.8045, 1.2176, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1826.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1827.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1828.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1829.0, 1.0, 171.9601, 35.8489, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1830.0, 1.0, 20.0553, 1.4325, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1831.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1832.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1833.0, 1.0, 78.7888, 25.7854, 1e-06, -1e-06, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1834.0, 1.0, 0.0, 0.0, 0.0, -1.4999925, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1835.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1836.0, 1.0, 34.1155, 9.7483, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1837.0, 1.0, 50.1884, -1.5185, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1838.0, 1.0, 5.3934, 1.2893, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1839.0, 1.0, 16.3308, 6.0882, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1840.0, 1.0, 44.3939, 9.0894, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1841.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1842.0, 1.0, 55.1521, 9.6122, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1843.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1844.0, 1.0, 21.4878, 24.3672, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1845.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1846.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1847.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1848.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1849.0, 1.0, 0.0, 0.0, 0.0, 5.74999045, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1850.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1851.0, 1.0, 0.0, 0.0, 0.0, -1.20000048, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1852.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1853.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1854.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1855.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1856.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1857.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1858.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1859.0, 1.0, 40.8269, 13.609, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1860.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1861.0, 1.0, 71.3611, 14.5974, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1862.0, 1.0, 0.0, 0.0, 0.0, 0.64800415, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1863.0, 1.0, 0.0, 0.0, 0.0, -3.8340098, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1864.0, 1.0, 0.0, 0.0, 0.0, -1.97550375, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1865.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1866.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1867.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1868.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1869.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1870.0, 1.0, 6.1598, 0.881, 0.0, 0.0, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1871.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1872.0, 1.0, 0.0, 0.0, 0.0, -1.1999976, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1873.0, 1.0, 0.0, 0.0, 0.0, -1.1999976, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1874.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1875.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1876.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1877.0, 1.0, 0.0, 0.0, 0.0, -1.7999964, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1878.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1879.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1880.0, 1.0, 0.0, 0.0, 0.0, 0.599988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1881.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1882.0, 1.0, 0.0, 0.0, 0.0, -1.20000048, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1883.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1884.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1885.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1886.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1887.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1888.0, 1.0, 8.4949, 1.2678, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1889.0, 1.0, 0.0, 0.0, 0.0, -0.6000024, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1890.0, 1.0, 0.0, 0.0, 0.0, -1.1999976, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1891.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1892.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1893.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1894.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1895.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1896.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1897.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1898.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1899.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1900.0, 1.0, 61.663, 4.2259, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1901.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1902.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1903.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1904.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1905.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1906.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1907.0, 1.0, 62.3148, 15.5429, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1908.0, 1.0, 26.5017, 5.8733, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1909.0, 1.0, 40.612, 16.2448, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1910.0, 1.0, 50.1383, 17.5484, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1911.0, 1.0, 81.2241, 16.2448, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1912.0, 1.0, 38.2484, 9.956, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1913.0, 1.0, 89.6258, -2.5857, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1914.0, 1.0, 18.501, 5.9879, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1915.0, 1.0, 24.2669, 7.8001, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1916.0, 1.0, 39.3944, 17.8707, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1917.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1918.0, 1.0, 148.9824, 36.5007, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1919.0, 1.0, 47.2016, -30.1546, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1920.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1921.0, 1.0, 53.9846, 0.0, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1922.0, 1.0, 49.6799, 18.7016, 5e-07, -5e-07, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ],
[1923.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1924.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1925.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1926.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1927.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1928.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1929.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1930.0, 1.0, 0.0, 0.0, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1931.0, 1.0, 78.7888, 4.9279, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1932.0, 1.0, 41.8368, 15.1274, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1933.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1934.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1935.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1936.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1937.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1938.0, 1.0, 23.7799, 6.9477, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1939.0, 1.0, 119.3363, 18.5512, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1940.0, 1.0, 63.9622, 6.7329, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1941.0, 1.0, 74.9353, 18.1787, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1942.0, 1.0, 173.3353, 54.3643, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1943.0, 1.0, 42.9829, 7.3775, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1944.0, 1.0, 106.8949, 8.3444, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1945.0, 1.0, 40.612, 16.2448, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1946.0, 1.0, 112.2382, 17.4768, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1947.0, 1.0, 106.2932, 17.4052, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1948.0, 1.0, 136.6627, 45.5542, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1949.0, 1.0, 53.2899, -0.6446, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1950.0, 1.0, 116.2493, 31.6588, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1951.0, 1.0, 95.6496, 23.3143, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1952.0, 1.0, 4.9136, 0.8953, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1953.0, 1.0, 27.662, 8.545, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1954.0, 1.0, 95.2628, 13.609, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1955.0, 1.0, 71.6262, 4.6772, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1956.0, 1.0, 16.0443, 5.2287, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1957.0, 1.0, 0.0, 0.0, 0.0, -2.3999952, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1958.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1959.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1960.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1961.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1962.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1963.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1964.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1965.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1966.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1967.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1968.0, 1.0, 124.5149, 7.5924, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1969.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1970.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1971.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1972.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1973.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1974.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1975.0, 1.0, 0.0, 0.0, 0.0, -1.08843537, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1976.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1977.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1978.0, 1.0, 156.8756, 18.2503, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1979.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1980.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1981.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1982.0, 1.0, 13.3941, 4.799, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1983.0, 1.0, 34.3089, 15.1131, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1984.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1985.0, 1.0, 208.8619, 84.3183, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1986.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1987.0, 1.0, 0.0, 0.0, 0.0, -1.23967967, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1988.0, 1.0, 139.5994, 26.4301, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1989.0, 1.0, 53.0034, 18.6228, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1990.0, 1.0, 85.9586, 31.7519, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1991.0, 1.0, 112.7467, 44.2005, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1992.0, 1.0, 89.5327, 10.8872, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1993.0, 1.0, 39.466, 18.4795, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1994.0, 1.0, 83.731, 14.2536, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1995.0, 1.0, 76.9265, 24.5678, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1996.0, 1.0, 0.0, 0.0, 0.0, -2.999994, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1997.0, 1.0, 0.0, 0.0, 0.0, -1.7999964, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1998.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[1999.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2000.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2001.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2002.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2003.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2004.0, 1.0, 77.2846, 18.3363, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2005.0, 1.0, 27.0747, 4.5841, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2006.0, 1.0, 123.9133, 36.601, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2007.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2008.0, 1.0, 89.3178, 10.9516, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2009.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2010.0, 1.0, 0.0, 0.0, 0.0, 13.8608871, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2011.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2012.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2013.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2014.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2015.0, 1.0, 98.7725, 3.3521, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2016.0, 1.0, 57.5803, 10.2784, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2017.0, 1.0, 0.0, 0.0, 0.0, 0.599988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2018.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2019.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2020.0, 1.0, 33.3277, 10.3213, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2021.0, 1.0, 78.4736, 12.2123, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2022.0, 1.0, 0.0, 0.0, 0.0, 1.29600829, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2023.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2024.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2025.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2026.0, 1.0, 68.9044, 7.0194, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2027.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2028.0, 1.0, 128.4257, 21.5595, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2029.0, 1.0, 57.3009, 18.3363, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2030.0, 1.0, 80.2213, 2.1488, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2031.0, 1.0, 0.0, 0.0, 0.0, -0.9000009, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2032.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2033.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2034.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2035.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2036.0, 1.0, 83.8742, 16.6173, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2037.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2038.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2039.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2040.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2041.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2042.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2043.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 63.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2044.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2045.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2046.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2047.0, 1.0, 93.0424, -12.8855, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2048.0, 1.0, 10.6508, 2.4568, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2049.0, 1.0, 0.0, 0.0, 0.0, -0.5999988, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2050.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2051.0, 1.0, 93.114, 14.3252, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2052.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2053.0, 1.0, 226.6968, 44.4082, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2054.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2055.0, 1.0, 0.0, 0.0, 0.0, -1.1999976, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2056.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2057.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2058.0, 1.0, 70.5661, 8.7241, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2059.0, 1.0, 59.0271, 11.453, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2060.0, 1.0, 174.6174, 60.7032, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2061.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2062.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2063.0, 1.0, 79.3618, 15.3996, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2064.0, 1.0, 39.9459, 8.1152, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2065.0, 1.0, 76.4251, 21.273, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2066.0, 1.0, 118.8994, 18.6228, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2067.0, 1.0, 111.6652, 21.6311, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2068.0, 1.0, 78.0725, 8.8816, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2069.0, 1.0, 142.2925, 26.5518, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2070.0, 1.0, 192.0297, 44.1217, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2071.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2072.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2073.0, 1.0, 97.3614, 42.5316, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2074.0, 1.0, 68.5462, 22.2757, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2075.0, 1.0, 133.9409, 34.0224, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2076.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2077.0, 1.0, 0.0, 0.0, 0.0, 0.900009, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2078.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 66.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2079.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2080.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2081.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2082.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2083.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2084.0, 1.0, 74.6345, 19.1242, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2085.0, 1.0, 39.8241, 14.9699, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2086.0, 1.0, 60.5957, 12.6778, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2087.0, 1.0, 101.9957, 31.3723, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2088.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2089.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2090.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2091.0, 1.0, 92.9708, -10.7153, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2092.0, 1.0, 99.7036, 32.8764, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2093.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2094.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2095.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2096.0, 1.0, 8.1224, 2.951, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2097.0, 1.0, 73.6317, 29.1519, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2098.0, 1.0, 70.122, 24.4245, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2099.0, 1.0, 73.1948, 15.9153, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2100.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2101.0, 1.0, 135.6743, 38.7641, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2102.0, 1.0, 162.3693, 56.9141, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2103.0, 1.0, 117.7892, 11.8971, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2104.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2105.0, 1.0, 236.2016, 75.9237, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2106.0, 1.0, 55.2739, 2.1273, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2107.0, 1.0, 57.3511, 19.9121, 1e-06, -1e-06, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2108.0, 1.0, 273.7552, 48.9207, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2109.0, 1.0, 216.311, 30.083, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2110.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2111.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2112.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2113.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2114.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2115.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2116.0, 2.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 10.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2117.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2118.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2119.0, 1.0, 23.7083, 0.0, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2120.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2121.0, 1.0, 275.7607, 63.031, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2122.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2123.0, 1.0, 88.5586, 27.0246, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2124.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2125.0, 1.0, 175.6202, 55.2309, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2126.0, 1.0, 218.03, 34.0224, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2127.0, 1.0, 114.4586, 31.0858, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2128.0, 1.0, 128.3469, 12.8139, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2129.0, 1.0, 11.6393, 4.6199, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2130.0, 1.0, 98.1278, 23.6366, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2131.0, 1.0, 0.5515, 1.7548, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2132.0, 1.0, 86.2665, 24.8614, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2133.0, 1.0, 154.426, 4.7273, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2134.0, 1.0, 64.4635, 16.474, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2135.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2136.0, 1.0, 0.0, 0.0, 0.0, -1.23967967, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2137.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2138.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2139.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2140.0, 1.0, 0.0, 0.0, 0.0, -1.36054422, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2141.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2142.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2143.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2144.0, 1.0, 0.0, 0.0, 0.0, -1.500015, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2145.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2146.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2147.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2148.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2149.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2150.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2151.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2152.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2153.0, 1.0, 98.4931, 31.8736, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2154.0, 1.0, 76.8549, 8.81, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2155.0, 1.0, 148.3378, 29.9397, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2156.0, 1.0, 49.5653, 11.4602, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2157.0, 1.0, 28.2207, 16.1875, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2158.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2159.0, 1.0, 37.3172, 8.3086, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2160.0, 1.0, 55.94, 16.1159, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2161.0, 1.0, 188.8066, 29.5816, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2162.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2163.0, 1.0, 132.8952, 16.166, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2164.0, 1.0, 85.8081, 7.3775, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2165.0, 1.0, 31.5155, 2.9367, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2166.0, 1.0, 126.7067, 31.0858, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2167.0, 1.0, 62.1715, 12.8927, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2168.0, 1.0, 75.981, 19.8118, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2169.0, 1.0, 145.5587, 11.1737, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2170.0, 1.0, 147.5499, 24.3529, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2171.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2172.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2173.0, 1.0, 114.2724, 27.4901, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2174.0, 1.0, 230.9944, 59.0916, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2175.0, 1.0, 156.5032, 58.3753, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2176.0, 1.0, 176.2004, 3.6529, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2177.0, 1.0, 148.7675, 24.138, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2178.0, 1.0, 182.2169, 53.1466, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2179.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2180.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2181.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2182.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2183.0, 1.0, 55.1951, 12.2194, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2184.0, 1.0, 91.6958, 14.225, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2185.0, 1.0, 92.1112, 35.4549, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2186.0, 1.0, 128.9271, 23.7799, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2187.0, 1.0, 157.5776, 35.455, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2188.0, 1.0, 156.5748, 37.9619, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2189.0, 1.0, 69.9071, 7.8072, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2190.0, 1.0, 102.2105, 7.2342, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2191.0, 1.0, 106.2932, 30.3695, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2192.0, 1.0, 128.569, -2.3852, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2193.0, 1.0, 174.0516, 20.7716, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2194.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2195.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2196.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2197.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2198.0, 1.0, 200.0089, 48.6413, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2199.0, 1.0, 42.8324, 11.6965, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2200.0, 1.0, 90.6572, 21.452, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2201.0, 1.0, 187.2738, 29.5744, 1e-07, -9.9e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2202.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2203.0, 1.0, 41.5432, 11.4602, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2204.0, 1.0, 189.8093, 66.6123, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2205.0, 1.0, 27.2179, 5.7301, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2206.0, 1.0, 112.7396, 42.8325, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2207.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2208.0, 1.0, 41.0418, 13.1076, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2209.0, 1.0, 119.9738, 47.4165, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2210.0, 1.0, 45.6975, 21.5595, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2211.0, 1.0, 51.356, 8.5235, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2212.0, 1.0, 55.2954, 16.0443, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2213.0, 1.0, 22.992, 6.2315, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2214.0, 1.0, 153.1367, 50.6397, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2215.0, 1.0, 82.0836, 23.0636, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2216.0, 1.0, 58.2321, 16.7605, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2217.0, 1.0, 230.6362, 75.5656, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2218.0, 1.0, 59.4497, 25.0692, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2219.0, 1.0, 35.8131, 4.6772, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2220.0, 1.0, 96.6237, 20.3418, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2221.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2222.0, 1.0, 105.7775, 15.5715, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2223.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2224.0, 1.0, 99.9185, 14.8266, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2225.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2226.0, 1.0, 130.3596, 59.4497, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2227.0, 1.0, 150.4149, 60.166, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2228.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2229.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2230.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 500.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2231.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2232.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2233.0, 1.0, 74.1331, 6.9477, 5e-07, -5e-07, 1.0, 1.0, 0.0, 220.0, 1.0, 1.1, 0.95, 0.6, 10 ],
[2234.0, 1.0, 0.0, 0.0, 0.0, 0.0, 2.0, 1.0, 0.0, 220.0, 2.0, 1.1, 0.95, 0.6, 10 ]
])
ppc["gen"] = array([
[1634.0, 40.0, 44.7, 68.2, 0.0, 1.07, 100.0, 1.0, 110.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 129.41, 22.0, 33.0, 33.0, 44.0 ],
[1632.0, 60.0, 43.6, 68.2, 0.0, 1.07, 100.0, 0.0, 110.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 129.41, 22.0, 33.0, 33.0, 44.0 ],
[1629.0, 90.0, 40.8, 77.46, 0.0, 1.07, 100.0, 1.0, 125.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 147.06, 25.0, 37.5, 37.5, 50.0 ],
[1685.0, 154.8, 75.3, 80.0, 0.0, 1.07, 100.0, 1.0, 157.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 177.177, 31.4, 47.1, 47.1, 62.8 ],
[1706.0, 282.3, 96.3, 185.9, 0.0, 1.07, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.0, 60.0, 90.0, 90.0, 120.0 ],
[1747.0, 79.0, 23.2, 41.5, 0.0, 1.0, 100.0, 0.0, 75.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 88.8888, 15.0, 22.5, 22.5, 30.0 ],
[1746.0, 77.8, 18.4, 41.5, 0.0, 1.0, 100.0, 0.0, 75.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 88.8888, 15.0, 22.5, 22.5, 30.0 ],
[31.0, 100.0, 12.6, 62.0, 0.0, 1.0, 100.0, 1.0, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 117.65, 20.0, 30.0, 30.0, 40.0 ],
[30.0, 100.0, 12.6, 62.0, 0.0, 1.0, 100.0, 0.0, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 117.65, 20.0, 30.0, 30.0, 40.0 ],
[23.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 9.978, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[4.0, 7.1, 1.8, 62.0, 0.0, 1.0, 100.0, 0.0, 9.9803, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1666.0, 193.0, 107.7, 185.9, 0.0, 1.0, 100.0, 1.0, 367.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.7, 70.0, 105.0, 105.0, 140.0 ],
[1665.0, 264.8, 115.6, 185.9, 0.0, 1.0, 100.0, 1.0, 367.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.7, 70.0, 105.0, 105.0, 140.0 ],
[1745.0, 234.1, 26.6, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1744.0, 231.6, 46.9, 216.9, 0.0, 1.02, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1743.0, 258.5, 46.6, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1742.0, 263.3, 101.2, 216.9, 0.0, 1.02, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1664.0, 350.0, 34.0, 216.9, 0.0, 1.015, 100.0, 0.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[26.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 18.5034, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[28.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 17.9407, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[19.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 13.2625, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1741.0, 283.9, 41.3, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1740.0, 262.8, 32.8, 216.9, 0.0, 1.03, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1670.0, 219.8, 92.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1669.0, 299.8, 103.9, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1687.0, 297.4, 102.2, 185.9, 0.0, 1.01, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1686.0, 297.7, 86.4, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1729.0, 266.4, 133.3, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1728.0, 225.0, 140.2, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1696.0, 209.0, 112.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1695.0, 209.0, 89.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1690.0, 133.1, 0.0, 88.0, 0.0, 1.0, 100.0, 1.0, 26.7239, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1659.0, 22.2, -0.9, 62.0, 0.0, 1.0, 100.0, 1.0, 12.3483, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1738.0, 134.2, 51.3, 50.0, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1737.0, 155.4, 40.6, 50.0, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1707.0, 264.3, 28.2, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1752.0, 254.3, 31.4, 216.9, 0.0, 1.0, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[13.0, 90.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 1.3183, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1703.0, 93.2, 0.0, 123.9, 0.0, 1.0, 100.0, 1.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.0, 30.0, 45.0, 45.0, 60.0 ],
[1702.0, 144.4, 17.6, 123.9, 0.0, 1.0, 100.0, 0.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.0, 30.0, 45.0, 45.0, 60.0 ],
[1704.0, 107.3, 0.0, 123.9, 0.0, 1.0, 100.0, 1.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.0, 30.0, 45.0, 45.0, 60.0 ],
[1705.0, 107.7, 9.9, 123.9, 0.0, 1.0, 100.0, 1.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.0, 30.0, 45.0, 45.0, 60.0 ],
[34.0, 30.0, 20.0, 35.0, 0.0, 1.003, 100.0, 1.0, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 40.0, 6.0, 9.0, 9.0, 12.0 ],
[33.0, 30.0, 20.0, 35.0, 0.0, 1.0, 100.0, 1.0, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 40.0, 6.0, 9.0, 9.0, 12.0 ],
[1678.0, 257.9, 99.5, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1677.0, 128.6, 88.6, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1655.0, 49.5, 0.0, 4.95, -0.0, 1.0, 100.0, 0.0, 1.1867, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 110.0, 19.8, 29.7, 29.7, 39.6 ],
[27.0, 48.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 7.0768, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1657.0, 90.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 2.7426, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1650.0, 1068.2, 202.5, 600.0, 0.0, 1.0, 100.0, 1.0, 1150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 1278.0, 223.6, 335.4, 335.4, 447.2 ],
[1648.0, 1000.0, 300.0, 600.0, 0.0, 1.0, 100.0, 1.0, 1150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 1277.778, 230.0, 345.0, 345.0, 460.0 ],
[35.0, 1118.0, 300.0, 600.0, 0.0, 1.0, 100.0, 0.0, 1150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 1278.0, 223.6, 335.4, 335.4, 447.2 ],
[1682.0, 246.6, 95.4, 185.9, 0.0, 1.0, 100.0, 1.0, 330.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 388.0, 66.0, 99.0, 99.0, 132.0 ],
[1681.0, 275.9, 100.9, 185.9, 0.0, 1.0, 100.0, 1.0, 330.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 388.0, 66.0, 99.0, 99.0, 132.0 ],
[2116.0, 58.3, 2.4, 44.9, 0.0, 1.0, 100.0, 0.0, 72.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 85.294, 14.5, 21.75, 21.75, 29.0 ],
[2114.0, 67.9, 2.3, 44.9, 0.0, 1.0, 100.0, 0.0, 72.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 85.294, 14.5, 21.75, 21.75, 29.0 ],
[2113.0, 67.0, 4.7, 44.9, 0.0, 1.0, 100.0, 0.0, 72.5, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 85.294, 14.5, 21.75, 21.75, 29.0 ],
[2112.0, 32.2, 5.0, 5.0, 0.0, 1.0, 100.0, 0.0, 36.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 41.14, 7.2, 10.8, 10.8, 14.4 ],
[2110.0, 32.6, 5.4, 5.0, 0.0, 1.0, 100.0, 0.0, 36.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 41.14, 7.2, 10.8, 10.8, 14.4 ],
[1736.0, 30.2, 5.9, 20.0, 0.0, 1.0, 100.0, 0.0, 42.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 49.412, 8.4, 12.6, 12.6, 16.8 ],
[1735.0, 30.8, 6.3, 20.0, 0.0, 1.0, 100.0, 0.0, 42.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 49.412, 8.4, 12.6, 12.6, 16.8 ],
[1734.0, 200.0, 88.0, 123.9, 0.0, 1.0, 100.0, 0.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1733.0, 200.0, 123.9, 123.9, 0.0, 1.03, 100.0, 0.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1732.0, 130.3, 19.7, 123.9, 0.0, 1.0, 100.0, 0.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1694.0, 212.5, 27.6, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1693.0, 215.3, 38.5, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[25.0, 48.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 2.1467, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1701.0, 472.5, 159.0, 290.6, 0.0, 1.03, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1700.0, 563.6, 210.1, 290.6, 0.0, 1.03, 100.0, 0.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1652.0, 50.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 1.3940000000000001,0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1645.0, 50.0, 20.0, 60.0, 0.0, 1.03, 100.0, 1.0, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 58.0, 10.0, 15.0, 15.0, 20.0 ],
[24.0, 50.0, 20.0, 60.0, 0.0, 1.03, 100.0, 0.0, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 58.0, 10.0, 15.0, 15.0, 20.0 ],
[1656.0, 49.5, 0.0, 4.95, -0.0, 1.0, 100.0, 1.0, 4.3276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 110.0, 19.8, 29.7, 29.7, 39.6 ],
[14.0, 49.5, 0.0, 4.95, -0.0, 1.0, 100.0, 0.0, 2.3369999999999997,0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 110.0, 19.8, 29.7, 29.7, 39.6 ],
[1679.0, 140.0, 9.6, 62.0, 0.0, 1.0, 100.0, 1.0, 10.8998, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[116.0, 99.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 1.882, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[18.0, 99.0, 20.0, 62.0, 0.0, 1.0, 100.0, 0.0, 5.6466, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[17.0, 99.0, 20.0, 62.0, 0.0, 1.0, 100.0, 0.0, 4.3464, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[16.0, 99.0, 20.0, 62.0, 0.0, 1.0, 100.0, 0.0, 1.4959, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[15.0, 99.0, 20.0, 62.0, 0.0, 1.0, 100.0, 0.0, 1.3183, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1612.0, 80.6, 23.4, 62.0, 0.0, 1.0, 100.0, 1.0, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 117.65, 20.0, 30.0, 30.0, 40.0 ],
[1609.0, 85.9, 28.5, 62.0, 0.0, 1.0, 100.0, 1.0, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 117.65, 20.0, 30.0, 30.0, 40.0 ],
[1691.0, 100.8, 44.0, 123.9, 0.0, 1.0, 100.0, 1.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.471, 30.0, 45.0, 45.0, 60.0 ],
[1662.0, 106.9, 43.8, 123.9, 0.0, 1.0, 100.0, 0.0, 150.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 176.471, 30.0, 45.0, 45.0, 60.0 ],
[1731.0, 119.9, 64.6, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1730.0, 121.8, 59.9, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1649.0, 200.0, 180.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[32.0, 200.0, 34.0, 216.9, 0.0, 1.015, 100.0, 1.0, 350.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 411.76, 70.0, 105.0, 105.0, 140.0 ],
[1651.0, 300.0, 166.0, 166.0, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 342.86, 60.0, 90.0, 90.0, 120.0 ],
[1653.0, 300.0, 166.0, 166.0, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 342.86, 60.0, 90.0, 90.0, 120.0 ],
[1654.0, 300.0, 166.0, 166.0, 0.0, 1.0, 100.0, 0.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 342.86, 60.0, 90.0, 90.0, 120.0 ],
[1674.0, 300.0, 166.0, 166.0, 0.0, 1.0, 100.0, 0.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 342.86, 60.0, 90.0, 90.0, 120.0 ],
[20.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 4.6309, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1668.0, 600.0, 283.0, 290.6, 0.0, 1.0, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1727.0, 200.0, 54.0, 130.1, 0.0, 0.98, 100.0, 0.0, 210.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 247.06, 42.0, 63.0, 63.0, 84.0 ],
[1726.0, 120.7, 61.9, 123.9, 0.0, 0.98, 100.0, 0.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1697.0, 450.0, 154.0, 290.6, 0.0, 1.0, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1643.0, 345.0, 100.0, 62.0, 0.0, 1.0, 100.0, 0.0, 100.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1725.0, 142.8, 36.0, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1724.0, 138.7, 67.0, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1710.0, 128.8, 69.5, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.294, 40.0, 60.0, 60.0, 80.0 ],
[1672.0, 184.5, 123.5, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1671.0, 181.3, 127.5, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1723.0, 34.9, 3.9, 20.0, 0.0, 1.0, 100.0, 0.0, 50.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 58.0, 10.0, 15.0, 15.0, 20.0 ],
[1722.0, 90.0, 1.0, 50.0, 0.0, 1.01, 100.0, 1.0, 90.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 100.0, 18.0, 27.0, 27.0, 36.0 ],
[1721.0, 90.0, 1.0, 50.0, 0.0, 1.0, 100.0, 0.0, 90.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 100.0, 18.0, 27.0, 27.0, 36.0 ],
[1720.0, 90.0, 1.0, 50.0, 0.0, 1.0, 100.0, 0.0, 90.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 100.0, 18.0, 27.0, 27.0, 36.0 ],
[1719.0, 90.0, 1.0, 50.0, 0.0, 1.0, 100.0, 0.0, 90.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 100.0, 18.0, 27.0, 27.0, 36.0 ],
[1646.0, 125.0, 40.0, 80.0, 0.0, 1.03, 100.0, 1.0, 125.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 177.177, 31.4, 47.1, 47.1, 62.8 ],
[1647.0, 125.0, 40.0, 80.0, 0.0, 1.03, 100.0, 1.0, 125.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 177.177, 31.4, 47.1, 47.1, 62.8 ],
[1676.0, 159.5, 85.5, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1675.0, 159.5, 79.9, 123.9, 0.0, 1.0, 100.0, 1.0, 200.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 235.29, 40.0, 60.0, 60.0, 80.0 ],
[1718.0, 610.2, 90.7, 387.5, 0.0, 1.0, 100.0, 1.0, 800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 888.89, 160.0, 240.0, 240.0, 320.0 ],
[1717.0, 574.5, 167.0, 387.5, 0.0, 1.0, 100.0, 1.0, 800.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 888.89, 160.0, 240.0, 240.0, 320.0 ],
[1692.0, 1004.3, 224.5, 484.0, 0.0, 1.0, 100.0, 1.0, 1000.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 1120.0, 201.6, 302.4, 302.4, 403.2 ],
[1663.0, 814.4, 190.8, 484.0, 0.0, 1.0, 100.0, 1.0, 1000.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 1120.0, 201.6, 302.4, 302.4, 403.2 ],
[1709.0, 105.1, 50.2, 77.46, 0.0, 1.03, 100.0, 1.0, 135.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 147.06, 27.0, 40.5, 40.5, 54.0 ],
[1708.0, 101.3, 47.1, 77.46, 0.0, 1.03, 100.0, 1.0, 135.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 147.06, 27.0, 40.5, 40.5, 54.0 ],
[5.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 1.0, 9.7189, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[29.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 11.6859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[2042.0, 39.5, 8.5, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 52.94, 9.0, 13.5, 13.5, 18.0 ],
[2040.0, 38.7, 4.5, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 52.94, 9.0, 13.5, 13.5, 18.0 ],
[2039.0, 39.0, 4.8, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 52.94, 9.0, 13.5, 13.5, 18.0 ],
[2037.0, 40.1, 6.6, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 52.94, 9.0, 13.5, 13.5, 18.0 ],
[1599.0, 50.0, 27.0, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 60.0, 10.0, 15.0, 15.0, 20.0 ],
[1597.0, 50.0, 27.0, 20.0, 0.0, 1.0, 100.0, 0.0, 45.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 60.0, 10.0, 15.0, 15.0, 20.0 ],
[1661.0, 99.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 9.987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1699.0, 597.1, 168.2, 290.6, 0.0, 1.0, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1698.0, 551.0, 167.2, 290.6, 0.0, 1.0, 100.0, 0.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1714.0, 213.5, 57.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1713.0, 235.0, 71.0, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1716.0, 222.7, 53.2, 185.9, 0.0, 1.0, 100.0, 0.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1715.0, 202.3, 59.3, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1680.0, 20.6, 6.6, 4.95, -0.0, 1.0, 100.0, 1.0, 10.270999999999999,0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 49.5, 9.9, 14.85, 14.85, 19.8 ],
[1658.0, 99.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 18.9532, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[21.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 11.5618, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1667.0, 594.9, 157.8, 290.6, 0.0, 1.03, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1673.0, 600.0, 137.0, 290.6, 0.0, 1.03, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1712.0, 256.7, 92.1, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1711.0, 256.7, 75.7, 185.9, 0.0, 1.0, 100.0, 1.0, 300.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 352.94, 60.0, 90.0, 90.0, 120.0 ],
[1749.0, 564.0, 103.0, 290.6, 0.0, 1.0, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1748.0, 543.0, 116.0, 290.6, 0.0, 1.0, 100.0, 0.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1684.0, 235.0, 80.0, 185.9, 0.0, 1.0, 100.0, 1.0, 330.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 388.0, 66.0, 99.0, 99.0, 132.0 ],
[1683.0, 234.4, 74.8, 185.9, 0.0, 1.0, 100.0, 1.0, 330.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 388.0, 66.0, 99.0, 99.0, 132.0 ],
[22.0, 49.5, 19.0, 62.0, 0.0, 1.0, 100.0, 1.0, 11.5618, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1660.0, 99.0, 19.0, 62.0, 0.0, 1.0, 100.0, 0.0, 20.4252, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1689.0, 114.9, -7.7, 62.0, 0.0, 1.0, 100.0, 1.0, 6.5922, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[117.0, 99.0, 15.0, 62.0, 0.0, 1.0, 100.0, 0.0, 20.7189, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[110.0, 99.0, 15.0, 62.0, 0.0, 1.0, 100.0, 0.0, 13.0313, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[108.0, 99.0, 15.0, 62.0, 0.0, 1.0, 100.0, 0.0, 9.151, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1688.0, 91.2, -3.3, 62.0, 0.0, 1.0, 100.0, 1.0, 4.3841, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[118.0, 99.0, 15.0, 62.0, 0.0, 1.0, 100.0, 0.0, 2.2621, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[111.0, 50.0, 10.0, 62.0, 0.0, 1.0, 100.0, 0.0, 0.0276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[107.0, 50.0, 10.0, 62.0, 0.0, 1.0, 100.0, 0.0, 0.2203, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 0.95, 117.65, 19.8, 29.7, 29.7, 39.6 ],
[1751.0, 497.9, 119.0, 290.6, 0.0, 1.0, 100.0, 0.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ],
[1750.0, 506.0, 142.0, 290.6, 0.0, 1.0, 100.0, 1.0, 600.0, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0.95, 666.67, 120.0, 180.0, 180.0, 240.0 ]
])
ppc["branch"] = array([
[1418.0, 2021.0, 0.000709, 0.03936, 0.0061, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[541.0, 2024.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[540.0, 2024.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1545.0, 1418.0, 0.00764, 0.040964, 0.06498, 70.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1545.0, 1418.0, 0.007179, 0.042257, 0.064288, 70.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1545.0, 2021.0, 0.0124, 0.0812, 0.1232, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[542.0, 1960.0, 0.001528, 0.02064, 2.0724, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[539.0, 1960.0, 0.00172, 0.02296, 2.21372, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2234.0, 2233.0, 0.0, 0.187, 0.281, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1870.0, 1871.0, 0.0055, 0.2, 0.3, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1821.0, 1804.0, 0.0017, 0.0122, 0.03806, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1821.0, 1804.0, 0.0017, 0.0122, 0.03806, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1821.0, 1913.0, 0.002785, 0.020342, 0.06345, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1821.0, 1913.0, 0.002804, 0.020317, 0.063616, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2194.0, 2193.0, 0.0007, 0.0031, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2194.0, 2193.0, 0.0007, 0.0031, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1869.0, 2170.0, 0.0, 0.0001, 0.0002, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 2231.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1962.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1988.0, 0.00046, 0.003737, 0.012788, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1988.0, 0.000424, 0.003818, 0.01291, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1993.0, 0.001928, 0.011229, 0.034974, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1993.0, 0.001775, 0.011229, 0.034426, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1824.0, 0.00242, 0.01694, 0.049586, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1824.0, 5e-06, 3.5e-05, 2.4e-05, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1839.0, 0.000545, 0.004212, 0.013316, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2232.0, 1839.0, 0.000541, 0.004268, 0.013416, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1966.0, 1965.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1966.0, 1961.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1966.0, 2034.0, 0.000436, 0.005137, 0.500594, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1763.0, 2099.0, 0.004241, 0.030126, 0.085066, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2192.0, 1782.0, 0.002004, 0.011367, 0.016964, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2192.0, 1840.0, 0.001859, 0.011245, 0.03521, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2192.0, 1840.0, 0.001995, 0.011437, 0.033768, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1794.0, 2208.0, 0.002049, 0.019073, 0.054854, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1794.0, 2026.0, 0.004879, 0.030837, 0.09544, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1796.0, 2220.0, 0.001408, 0.006842, 0.024408, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1796.0, 2220.0, 0.001394, 0.006874, 0.024286, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 1999.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 1998.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2153.0, 0.008206, 0.048173, 0.133258, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2153.0, 0.007348, 0.042683, 0.114282, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2152.0, 0.007455, 0.049655, 0.13954, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 1776.0, 0.007141, 0.033921, 0.09508, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2065.0, 0.0017, 0.0076, 0.0198, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2065.0, 0.0018, 0.00704, 0.0182, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2004.0, 0.0041, 0.0196, 0.0546, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 1989.0, 0.005358, 0.0248, 0.0503, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 1989.0, 0.004066, 0.021045, 0.057736, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2000.0, 2036.0, 0.0139, 0.0491, 0.1352, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2000.0, 1931.0, 0.001403, 0.007678, 0.020786, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 2002.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 2001.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 115.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 1970.0, 0.000812, 0.015612, 1.68775, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 1972.0, 0.000816, 0.015984, 1.68775, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 1789.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2003.0, 483.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[115.0, 109.0, 0.001236, 0.013293, 1.480528, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2191.0, 1837.0, 0.001635, 0.012705, 0.037662, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2191.0, 1818.0, 0.01022, 0.042629, 0.06611, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2226.0, 2210.0, 0.001173, 0.005248, 0.008748, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2226.0, 2190.0, 0.00036, 0.0073, 0.0134, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2189.0, 2188.0, 0.0023, 0.0078, 0.0138, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2189.0, 1907.0, 0.002424, 0.014193, 0.040774, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2189.0, 2187.0, 0.007996, 0.039339, 0.110062, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2186.0, 2217.0, 0.0055, 0.0238, 0.0364, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2186.0, 1956.0, 0.002, 0.01, 0.016, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2186.0, 2185.0, 0.0028, 0.0141, 0.0216, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2219.0, 2218.0, 0.002676, 0.015582, 0.050366, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2219.0, 2218.0, 0.002791, 0.015447, 0.050366, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1796.0, 0.001819, 0.009567, 0.03228, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1796.0, 0.00179, 0.009574, 0.03228, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2219.0, 0.001167, 0.006646, 0.023698, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2219.0, 0.001154, 0.006607, 0.023536, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2215.0, 0.0029, 0.0172, 0.0498, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2215.0, 0.003, 0.0174, 0.0496, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1947.0, 0.00434, 0.02042, 0.09428, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2221.0, 2216.0, 0.0005, 0.00293, 0.008814, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2216.0, 0.0005, 0.00293, 0.008814, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1938.0, 0.001983, 0.0125, 0.038, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2217.0, 0.0026, 0.0159, 0.045, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2217.0, 0.0025, 0.0156, 0.04604, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1956.0, 0.001996, 0.015004, 0.049722, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1956.0, 0.001942, 0.015223, 0.048658, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 2214.0, 0.00705, 0.0366, 0.0638, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1970.0, 122.0, 0.004241, 0.030126, 0.085066, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1970.0, 2032.0, 0.001038, 0.010782, 0.99978, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1972.0, 112.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1972.0, 1970.0, 1e-05, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1972.0, 1971.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1972.0, 2034.0, 0.000863, 0.008857, 0.583716, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[122.0, 121.0, 0.000863, 0.008857, 0.583716, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1898.0, 1970.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1898.0, 122.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1898.0, 120.0, 0.001351, 0.015445, 1.51142, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1896.0, 1972.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1896.0, 1897.0, 0.001355, 0.017948, 1.76, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2184.0, 2169.0, 0.002551, 0.012, 0.032826, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2184.0, 2169.0, 0.002288, 0.012288, 0.051244, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2203.0, 2134.0, 0.0149, 0.0858, 0.1412, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2203.0, 1949.0, 0.0105, 0.05925, 0.0525, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2203.0, 2208.0, 0.00447, 0.02537, 0.03784, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2183.0, 2222.0, 0.001446, 0.009469, 0.030074, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2212.0, 1473.0, 0.0218, 0.0638, 0.066, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2212.0, 1831.0, 0.004731, 0.023671, 0.047954, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2212.0, 2097.0, 0.003778, 0.017949, 0.05031, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2212.0, 2182.0, 0.0035, 0.0205, 0.0556, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2212.0, 2182.0, 0.007552, 0.0302, 0.046742, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2212.0, 1909.0, 0.004017, 0.028224, 0.081516, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2181.0, 57.0, 1e-06, 1e-06, 2e-06, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2181.0, 2209.0, 0.0143, 0.075, 0.1148, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2181.0, 2180.0, 0.0006, 0.0032, 0.005, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2181.0, 2179.0, 0.0052, 0.0259, 0.038, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1770.0, 1912.0, 0.0004, 0.003044, 0.009322, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 1912.0, 0.0004, 0.003044, 0.009322, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2155.0, 0.000856, 0.006515, 0.019094, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2155.0, 0.000856, 0.006515, 0.019094, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2224.0, 0.00164, 0.012482, 0.036582, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2224.0, 0.00164, 0.012482, 0.036582, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2030.0, 0.001344, 0.010229, 0.02998, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 2030.0, 0.001344, 0.010229, 0.02998, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 1940.0, 0.001313, 0.009985, 0.029266, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 1940.0, 0.001313, 0.009985, 0.029266, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1772.0, 1771.0, 0.000697, 0.008904, 0.966246, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1772.0, 1771.0, 0.000697, 0.008904, 0.966246, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1944.0, 42.0, 0.003347, 0.019091, 0.05291, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1944.0, 1888.0, 0.00452, 0.021267, 0.06035, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1944.0, 1888.0, 0.0033, 0.021, 0.061034, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[40.0, 2157.0, 0.002254, 0.015419, 0.044362, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 1985.0, 0.0004, 0.0018, 0.0044, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 1985.0, 0.0004, 0.0018, 0.0044, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 2193.0, 0.0003, 0.0017, 0.004, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 2193.0, 0.0003, 0.0025, 0.005, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 2090.0, 0.0019, 0.0086, 0.0214, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1988.0, 2087.0, 0.0008, 0.0055, 0.0142, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2226.0, 0.002291, 0.017079, 0.050654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2226.0, 0.00258, 0.018126, 0.05235, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 1856.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2227.0, 0.004044, 0.029321, 0.090328, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2227.0, 0.003984, 0.029357, 0.09127, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2074.0, 0.001113, 0.006391, 0.02179, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1924.0, 2074.0, 0.001088, 0.006441, 0.021698, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1813.0, 1928.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1812.0, 1924.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1928.0, 1970.0, 0.0012, 0.015315, 1.662034, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1928.0, 1972.0, 0.0012, 0.015315, 1.662034, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1928.0, 1855.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1928.0, 1790.0, 0.0005, 0.009109, 0.977482, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1928.0, 1790.0, 0.000499, 0.009108, 0.977482, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1928.0, 2034.0, 0.000494, 0.009033, 0.96659, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1928.0, 2024.0, 0.000363, 0.006412, 0.672766, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1912.0, 2155.0, 0.000721, 0.003805, 0.023416, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2177.0, 2175.0, 0.0018, 0.0107, 0.0208, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2177.0, 2175.0, 0.0013, 0.0109, 0.0364, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2177.0, 2174.0, 0.003659, 0.01587, 0.045896, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2177.0, 2176.0, 0.001, 0.004, 0.0076, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2177.0, 2176.0, 0.0009, 0.0039, 0.00888, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2173.0, 2171.0, 0.0049, 0.0203, 0.0352, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2173.0, 2172.0, 0.0014, 0.0089, 0.0272, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1810.0, 1939.0, 0.000764, 0.005558, 0.06534, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1810.0, 2202.0, 0.001198, 0.009194, 0.095348, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2171.0, 2168.0, 0.002645, 0.016233, 0.122918, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2171.0, 1829.0, 0.000831, 0.007075, 0.049208, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2171.0, 2169.0, 0.0006, 0.0048, 0.0144, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2171.0, 2169.0, 0.0007, 0.005, 0.0146, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2171.0, 1941.0, 0.0005, 0.003, 0.0076, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1809.0, 2218.0, 0.000453, 0.005, 0.0074, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1809.0, 2218.0, 0.000453, 0.005, 0.0074, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[53.0, 1909.0, 0.003648, 0.013602, 0.02284, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[55.0, 1909.0, 0.003648, 0.013602, 0.02284, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[36.0, 1831.0, 0.001722, 0.010968, 0.017098, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2167.0, 1982.0, 0.0036, 0.0317, 0.0886, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2167.0, 1983.0, 0.00206, 0.01115, 0.01946, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2162.0, 1908.0, 0.000426, 0.002537, 0.00866, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2162.0, 1908.0, 0.00045, 0.002581, 0.008058, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2162.0, 2161.0, 0.001, 0.006138, 0.017238, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2162.0, 2161.0, 0.001, 0.00539, 0.01767, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1794.0, 0.004382, 0.027697, 0.085722, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1794.0, 0.003049, 0.028391, 0.081652, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1887.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2166.0, 0.003412, 0.01859, 0.035532, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2209.0, 0.005598, 0.030473, 0.051208, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2209.0, 0.005475, 0.032322, 0.077422, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1908.0, 0.005469, 0.034514, 0.10096, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1908.0, 0.005539, 0.034934, 0.100658, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2164.0, 0.00228, 0.015838, 0.046554, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2208.0, 0.005808, 0.044554, 0.131736, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 2026.0, 0.014736, 0.08342, 0.159408, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1927.0, 1928.0, 0.001024, 0.01164, 1.045364, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1927.0, 1928.0, 0.00083, 0.011237, 1.038556, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1927.0, 1886.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1927.0, 1814.0, 0.00049, 0.005109, 0.49856, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2166.0, 2164.0, 0.0019, 0.0094, 0.0118, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2166.0, 2165.0, 0.0011, 0.006921, 0.0214, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2166.0, 2165.0, 0.001254, 0.006957, 0.020732, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2166.0, 1783.0, 0.018061, 0.104849, 0.16225, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2166.0, 2163.0, 0.02, 0.128, 0.184, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1841.0, 1925.0, 0.002005, 0.015458, 0.048382, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1841.0, 1925.0, 0.001952, 0.015406, 0.048262, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2160.0, 1842.0, 0.009545, 0.050416, 0.0775, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2160.0, 1910.0, 0.001505, 0.00955, 0.029252, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2159.0, 2156.0, 0.0024, 0.0141, 0.0394, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2159.0, 2156.0, 0.002467, 0.012564, 0.036174, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2159.0, 2158.0, 0.0036, 0.0224, 0.0614, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2159.0, 2157.0, 0.0066, 0.0357, 0.056, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2159.0, 2157.0, 0.0066, 0.0357, 0.066724, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1906.0, 2156.0, 0.001131, 0.010327, 0.03263, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1906.0, 2156.0, 0.00134, 0.010137, 0.032934, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2155.0, 2232.0, 0.002, 0.011176, 0.022224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2155.0, 2232.0, 0.002, 0.011176, 0.022224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2155.0, 2154.0, 0.000957, 0.004942, 0.015, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2155.0, 1940.0, 0.0013, 0.0068, 0.06552, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[45.0, 1995.0, 0.007107, 0.034738, 0.060772, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[45.0, 1995.0, 0.004876, 0.023832, 0.041692, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[45.0, 2185.0, 0.002149, 0.010502, 0.018372, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[45.0, 2185.0, 0.00157, 0.007675, 0.013426, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2188.0, 2228.0, 0.0032, 0.0124, 0.033, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2188.0, 2228.0, 0.003, 0.0143, 0.0408, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2153.0, 2152.0, 0.0053, 0.0319, 0.0654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1987.0, 2003.0, 0.00057, 0.005567, 0.51967, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2151.0, 2150.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2151.0, 2149.0, 0.0003, 0.0024, 0.0064, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2151.0, 2149.0, 0.0003, 0.0024, 0.0064, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2148.0, 2147.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2148.0, 2146.0, 0.0003, 0.0024, 0.0062, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2145.0, 2143.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2145.0, 2142.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2145.0, 2141.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2145.0, 2144.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2142.0, 1987.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2142.0, 2139.0, 0.0016, 0.0178, 1.672, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2142.0, 2140.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2141.0, 2138.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2137.0, 2142.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2137.0, 2141.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2137.0, 2135.0, 0.0015, 0.0181, 1.6626, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2137.0, 2136.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1807.0, 2106.0, 0.001225, 0.00965, 0.029664, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2156.0, 51.0, 0.00113, 0.008562, 0.02454, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2156.0, 51.0, 0.001024, 0.007755, 0.022224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2156.0, 2130.0, 0.008293, 0.046318, 0.129332, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2175.0, 2207.0, 0.001095, 0.007076, 0.019756, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2175.0, 2207.0, 0.001116, 0.007079, 0.019756, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2175.0, 1784.0, 0.000787, 0.004344, 0.014244, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2175.0, 1784.0, 0.000787, 0.004344, 0.014244, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1947.0, 2220.0, 0.000603, 0.003376, 0.009118, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1947.0, 2220.0, 0.000475, 0.00314, 0.009422, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2209.0, 2134.0, 0.0137, 0.0773, 0.1374, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2209.0, 2208.0, 0.00517, 0.0294, 0.04392, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1791.0, 0.000869, 0.007208, 0.024548, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1791.0, 0.000738, 0.007235, 0.024668, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1990.0, 0.001151, 0.007729, 0.026286, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1990.0, 0.000871, 0.007813, 0.026216, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 48.0, 0.005823, 0.027349, 0.07467, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 48.0, 0.005823, 0.027349, 0.07467, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1842.0, 0.001531, 0.010085, 0.030386, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1842.0, 0.001531, 0.010085, 0.030386, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 2228.0, 0.007567, 0.040931, 0.114362, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2094.0, 2228.0, 0.006829, 0.035599, 0.10737, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2094.0, 2228.0, 0.010092, 0.044787, 0.083766, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1.0, 0.006166, 0.027296, 0.045504, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1937.0, 1792.0, 0.0, 1e-06, 0.0, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1937.0, 2133.0, 0.00124, 0.008152, 0.014254, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1937.0, 2014.0, 0.002055, 0.016456, 0.05077, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1937.0, 2014.0, 0.002055, 0.016456, 0.05077, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1937.0, 1774.0, 0.005207, 0.03944, 0.113034, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1792.0, 2123.0, 0.00124, 0.01052, 0.018254, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1792.0, 2014.0, 0.002055, 0.016456, 0.05077, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1792.0, 1774.0, 0.005207, 0.03944, 0.113034, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1901.0, 1913.0, 0.0037, 0.0294, 0.085666, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1802.0, 1913.0, 0.002304, 0.015628, 0.04459, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2152.0, 2132.0, 0.002, 0.0066, 0.0096, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2152.0, 2131.0, 0.002, 0.0084, 0.0176, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2152.0, 2131.0, 0.0027, 0.009, 0.0144, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1820.0, 1821.0, 0.003241, 0.020126, 0.057066, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[59.0, 1804.0, 0.0, 0.0001, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[58.0, 1804.0, 0.0, 0.0001, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2227.0, 2226.0, 0.0006, 0.00225, 0.007, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2227.0, 2226.0, 0.0006, 0.00225, 0.007, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2227.0, 1955.0, 0.000528, 0.005104, 0.00836, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2227.0, 1955.0, 0.000528, 0.005104, 0.00836, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2216.0, 2214.0, 0.0072, 0.0325, 0.047, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1854.0, 2128.0, 0.00069, 0.004434, 0.014444, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1854.0, 2198.0, 0.002688, 0.016159, 0.048504, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1854.0, 2172.0, 0.000758, 0.004368, 0.015356, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1854.0, 2172.0, 0.000706, 0.004367, 0.015052, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2200.0, 1943.0, 0.0003, 0.0029, 0.00475, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2010.0, 557.0, 1e-06, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2010.0, 556.0, 1e-06, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2010.0, 553.0, 1e-06, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2010.0, 552.0, 1e-06, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2010.0, 2009.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2130.0, 51.0, 0.006325, 0.047909, 0.137306, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2130.0, 2156.0, 0.006231, 0.047431, 0.139012, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2130.0, 2129.0, 0.008403, 0.052574, 0.08514, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2130.0, 2129.0, 0.008106, 0.03814, 0.0886, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2128.0, 1840.0, 0.001822, 0.010859, 0.032462, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2211.0, 2210.0, 0.0043, 0.0204, 0.0302, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[46.0, 1925.0, 0.007438, 0.056343, 0.161476, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[46.0, 2166.0, 0.005702, 0.043196, 0.123798, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[46.0, 1783.0, 0.005678, 0.043008, 0.12326, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2210.0, 1910.0, 0.004774, 0.033037, 0.094882, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2127.0, 2225.0, 0.0016, 0.0087, 0.0092, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2127.0, 1824.0, 0.002094, 0.01628, 0.048262, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1837.0, 43.0, 0.002851, 0.021598, 0.0619, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1837.0, 43.0, 0.002851, 0.021598, 0.0619, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1837.0, 3.0, 0.007298, 0.023277, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1826.0, 1827.0, 0.002963, 0.017781, 0.051432, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2168.0, 2172.0, 0.001353, 0.007979, 0.09775, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2126.0, 2177.0, 0.001083, 0.006426, 0.017174, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2125.0, 2133.0, 0.001, 0.0066, 0.01932, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2125.0, 2133.0, 0.0011, 0.0066, 0.0216, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2125.0, 2124.0, 0.001048, 0.007655, 0.021428, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2125.0, 2124.0, 0.001064, 0.007566, 0.02158, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1806.0, 1968.0, 0.004027, 0.025987, 0.06444, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1806.0, 1968.0, 0.006024, 0.031897, 0.07314, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 1777.0, 0.002361, 0.01109, 0.030276, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 1777.0, 0.002361, 0.01109, 0.030276, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 2036.0, 0.001453, 0.011009, 0.031552, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 2036.0, 0.001453, 0.011009, 0.031552, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 1817.0, 0.002715, 0.020567, 0.058944, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[41.0, 1817.0, 0.002715, 0.020567, 0.058944, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[54.0, 2064.0, 0.003648, 0.013602, 0.02284, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1800.0, 1944.0, 0.00362, 0.02356, 0.070238, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1800.0, 1944.0, 0.00362, 0.02356, 0.070238, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1917.0, 1978.0, 0.001756, 0.012722, 0.039038, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1917.0, 1978.0, 0.001756, 0.012768, 0.039174, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2193.0, 2232.0, 0.00036, 0.00247, 0.008304, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2193.0, 2232.0, 0.00036, 0.002473, 0.008404, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1793.0, 1831.0, 0.004018, 0.02119, 0.031322, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1952.0, 1951.0, 0.00445, 0.02678, 0.0424, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1834.0, 1973.0, 0.001166, 0.01489, 1.616022, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1834.0, 1897.0, 0.000188, 0.003424, 0.356704, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1834.0, 1897.0, 0.000184, 0.003403, 0.358824, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1834.0, 1897.0, 0.000222, 0.003421, 0.351524, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1858.0, 1859.0, 0.0011, 0.0097, 0.030288, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1858.0, 1859.0, 0.0011, 0.0097, 0.030288, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 2126.0, 0.0016, 0.0111, 0.0326, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 2126.0, 0.002435, 0.013008, 0.039056, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 2121.0, 0.0012, 0.0051, 0.017, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 2182.0, 0.01269, 0.070386, 0.213056, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 2120.0, 0.0205, 0.0676, 0.291, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2174.0, 44.0, 0.005062, 0.023775, 0.064912, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2015.0, 2196.0, 0.0006, 0.0031, 0.0436, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1861.0, 2196.0, 0.0006, 0.0031, 0.0436, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2118.0, 1780.0, 0.014222, 0.06951, 0.121602, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2118.0, 1780.0, 0.014222, 0.06951, 0.121602, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2116.0, 2115.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2114.0, 2115.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2113.0, 2115.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 1924.0, 0.024837, 0.137353, 0.21539, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 2118.0, 0.0018, 0.0039, 0.0067, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 1780.0, 0.013636, 0.077335, 0.11541, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 1780.0, 0.013636, 0.077335, 0.11541, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 2117.0, 0.00714, 0.021, 0.0326, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 1992.0, 0.015847, 0.094112, 0.149088, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2119.0, 1992.0, 0.0163, 0.097, 0.1432, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1977.0, 1927.0, 0.000918, 0.012759, 1.2575, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1977.0, 1927.0, 0.000926, 0.012736, 1.256638, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1977.0, 1883.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1977.0, 1976.0, 0.001129, 0.015209, 1.424948, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1977.0, 1902.0, 0.000146, 0.001874, 0.18991, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1977.0, 1903.0, 0.000172, 0.001884, 0.195408, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1780.0, 1992.0, 0.004254, 0.024125, 0.036002, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1780.0, 1992.0, 0.004254, 0.024125, 0.036002, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1975.0, 1977.0, 0.001129, 0.015209, 0.142494, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1975.0, 1974.0, 0.0, 0.0001, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2112.0, 2111.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2110.0, 2111.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 1844.0, 0.002676, 0.015397, 0.031688, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2109.0, 2207.0, 0.0017, 0.0107, 0.0284, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 2207.0, 0.0006, 0.0105, 0.0286, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 1769.0, 0.003999, 0.030444, 0.089226, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 1769.0, 0.003999, 0.030444, 0.089226, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 2005.0, 0.0016, 0.0048, 0.1224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2109.0, 2204.0, 0.001983, 0.011962, 0.03345, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2109.0, 2108.0, 0.0017, 0.0091, 0.0272, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2109.0, 2108.0, 0.002178, 0.011857, 0.128572, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2107.0, 1948.0, 0.01167, 0.052547, 0.12149, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2107.0, 1953.0, 0.0086, 0.0528, 0.15631, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2106.0, 1948.0, 0.004412, 0.025837, 0.072956, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2106.0, 1921.0, 0.0041, 0.0339, 0.104598, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2106.0, 2105.0, 0.005559, 0.034409, 0.034118, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2106.0, 2105.0, 0.006452, 0.030781, 0.04556, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1939.0, 0.001728, 0.014502, 0.11525, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1939.0, 0.001774, 0.014573, 0.113328, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2202.0, 2200.0, 0.000613, 0.004558, 0.02771, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2200.0, 0.000609, 0.004555, 0.027656, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1943.0, 0.000486, 0.004698, 0.007696, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1943.0, 0.000486, 0.004698, 0.007696, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1874.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2223.0, 0.00323, 0.013, 0.04, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2223.0, 0.00323, 0.013, 0.04, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2199.0, 0.00423, 0.0233, 0.06904, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2199.0, 0.002383, 0.018144, 0.053178, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2201.0, 0.000809, 0.006324, 0.084454, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 2201.0, 0.0008, 0.0063, 0.01612, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1976.0, 1875.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1976.0, 1974.0, 1e-05, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1976.0, 1897.0, 0.001027, 0.013427, 1.31672, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1976.0, 1897.0, 0.001027, 0.013427, 1.31672, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1976.0, 1926.0, 0.00054, 0.007314, 0.736074, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1974.0, 1973.0, 0.001798, 0.017107, 0.320912, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1984.0, 2153.0, 0.0013, 0.0098, 0.0296, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1984.0, 2153.0, 0.0013, 0.0098, 0.0298, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2104.0, 2119.0, 0.0099, 0.035083, 0.048204, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2012.0, 2011.0, 0.043836, 0.178923, 0.032564, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2102.0, 1930.0, 0.00553, 0.029104, 0.081816, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2102.0, 1930.0, 0.003466, 0.018151, 0.05141, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2102.0, 2101.0, 0.0019, 0.012, 0.0332, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2102.0, 2100.0, 0.0098, 0.0256, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2146.0, 2149.0, 0.0, 1e-06, 2e-06, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2146.0, 2075.0, 0.004, 0.0362, 0.0958, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2146.0, 2098.0, 0.0042, 0.0213, 0.0612, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2146.0, 2098.0, 0.00376, 0.021467, 0.060712, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2146.0, 1931.0, 0.005604, 0.031448, 0.087188, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2149.0, 2099.0, 0.0023, 0.0112, 0.03, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2149.0, 2099.0, 0.0026, 0.013, 0.03, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2149.0, 1915.0, 0.001405, 0.006673, 0.0208, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2149.0, 1915.0, 0.001368, 0.00666, 0.020638, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2103.0, 1806.0, 0.009481, 0.05461, 0.09703, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2103.0, 1942.0, 0.00216, 0.01062, 0.0171, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2103.0, 1942.0, 0.00216, 0.01062, 0.0171, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2103.0, 1915.0, 0.002927, 0.011569, 0.03306, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2103.0, 1915.0, 0.002199, 0.011585, 0.0324, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1936.0, 2069.0, 0.001533, 0.01167, 0.03418, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1936.0, 2069.0, 0.001405, 0.01136, 0.03412, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1938.0, 2217.0, 0.000413, 0.002459, 0.0076, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[52.0, 2098.0, 0.003648, 0.013602, 0.02284, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1948.0, 1838.0, 0.004812, 0.029932, 0.088632, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1948.0, 1838.0, 0.004831, 0.030014, 0.0893, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1948.0, 2105.0, 0.004686, 0.03165, 0.96246, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1948.0, 2105.0, 0.004761, 0.03174, 0.945046, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2097.0, 2182.0, 0.0012, 0.0056, 0.0108, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1959.0, 1876.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2164.0, 2179.0, 0.0053, 0.0326, 0.0446, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2134.0, 2096.0, 0.0064, 0.061, 0.0914, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1949.0, 1795.0, 0.001026, 0.009918, 0.016246, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1949.0, 1795.0, 0.001026, 0.009918, 0.016246, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1949.0, 2211.0, 0.00437, 0.0184, 0.0161, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1788.0, 2098.0, 0.008655, 0.03852, 0.0579, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2187.0, 1991.0, 0.00095, 0.00498, 0.008738, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2187.0, 1842.0, 0.001028, 0.005377, 0.008848, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2187.0, 1842.0, 0.001367, 0.007231, 0.011618, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2187.0, 1774.0, 0.000967, 0.008013, 0.027288, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2187.0, 1774.0, 0.000967, 0.008013, 0.027288, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1778.0, 1948.0, 0.001734, 0.013202, 0.038696, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1778.0, 1948.0, 0.001734, 0.013202, 0.038696, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1778.0, 2105.0, 0.00244, 0.018575, 0.05444, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1778.0, 2105.0, 0.00244, 0.018575, 0.05444, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2093.0, 2092.0, 0.0021, 0.009, 0.0162, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2093.0, 2092.0, 0.0021, 0.0092, 0.0164, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2122.0, 2091.0, 0.0018, 0.0107, 0.0316, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2122.0, 1.0, 0.0025, 0.01318, 0.01978, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2089.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2088.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2090.0, 1993.0, 0.001073, 0.006678, 0.020362, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 1993.0, 0.001068, 0.006721, 0.020362, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2087.0, 0.0007, 0.004, 0.0106, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2087.0, 0.0007, 0.004, 0.0106, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2086.0, 0.0014, 0.0061, 0.0178, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2090.0, 2086.0, 0.0015, 0.0062, 0.0178, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 2092.0, 0.000577, 0.004153, 0.012844, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 2092.0, 0.000577, 0.004153, 0.013046, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 2084.0, 0.0085, 0.0302, 0.0566, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2088.0, 2084.0, 0.0085, 0.0393, 0.0566, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2088.0, 2085.0, 0.0019, 0.0104, 0.0164, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 2085.0, 0.0016, 0.008, 0.022, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1779.0, 0.001312, 0.009985, 0.029266, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1779.0, 0.001312, 0.009985, 0.029266, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1859.0, 0.002117, 0.014224, 0.044428, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1859.0, 0.014442, 0.014442, 0.04484, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2083.0, 2082.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2083.0, 2135.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2083.0, 2139.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2083.0, 1771.0, 0.000327, 0.00455, 0.448486, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2135.0, 1966.0, 0.000205, 0.002384, 0.23393, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2135.0, 1966.0, 0.000168, 0.00234, 0.237148, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2135.0, 2081.0, 0.0006, 0.0071, 0.697466, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2080.0, 2135.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2080.0, 2139.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2080.0, 2079.0, 0.0007, 0.0071, 0.6752, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1767.0, 1795.0, 0.0007, 0.003549, 0.011358, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1767.0, 1795.0, 0.0007, 0.003549, 0.011358, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[114.0, 109.0, 0.001236, 0.013293, 1.480528, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[114.0, 1786.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[113.0, 112.0, 0.001641, 0.01764, 1.964682, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[113.0, 1786.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 2205.0, 0.001323, 0.013531, 0.041808, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 2205.0, 0.001323, 0.013531, 0.041808, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 2084.0, 9.8e-05, 0.001366, 0.134654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 2084.0, 9.8e-05, 0.001366, 0.134654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 119.0, 0.003842, 0.035772, 0.102888, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 119.0, 0.003842, 0.035772, 0.102888, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1929.0, 1932.0, 0.00352, 0.01739, 0.027392, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2099.0, 2075.0, 0.0075, 0.0333, 0.0862, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2099.0, 1932.0, 0.000571, 0.003917, 0.011298, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2099.0, 1932.0, 0.000625, 0.004002, 0.011024, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2192.0, 0.005799, 0.044143, 0.129376, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2192.0, 0.005799, 0.044143, 0.129376, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2197.0, 0.000333, 0.001914, 0.010434, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2197.0, 0.000335, 0.001915, 0.010716, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2195.0, 0.000709, 0.004256, 0.014632, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2198.0, 2196.0, 0.001161, 0.006866, 0.02572, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1934.0, 1933.0, 0.006777, 0.036325, 0.099522, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1766.0, 2098.0, 0.004241, 0.030126, 0.085066, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1968.0, 1948.0, 0.007335, 0.040468, 0.132678, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1968.0, 1948.0, 0.007335, 0.040468, 0.132678, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 1986.0, 0.0014, 0.008, 0.012, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 2133.0, 0.0024, 0.0152, 0.0254, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 2133.0, 0.0028, 0.0165, 0.0256, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 2122.0, 0.0014, 0.008, 0.0134, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 2122.0, 0.0007, 0.0052, 0.0224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2123.0, 2021.0, 0.012484, 0.069281, 0.11486, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2132.0, 2131.0, 0.0015, 0.0066, 0.012, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2178.0, 2191.0, 0.006813, 0.043, 0.06108, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2178.0, 1818.0, 0.001267, 0.006536, 0.0117, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2178.0, 1818.0, 0.001185, 0.006504, 0.010946, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[12.0, 1679.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[12.0, 116.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 18.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 17.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 16.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 15.0, 0.0, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1857.0, 51.0, 0.002531, 0.019174, 0.05495, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1857.0, 2156.0, 0.003173, 0.027163, 0.078504, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1982.0, 1911.0, 0.004746, 0.035379, 0.105292, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1918.0, 1917.0, 0.00248, 0.01851, 0.055088, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1918.0, 1917.0, 0.002438, 0.01845, 0.055446, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1918.0, 2202.0, 0.001864, 0.014205, 0.044768, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1918.0, 2202.0, 0.001869, 0.014081, 0.044908, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1914.0, 2107.0, 0.0036, 0.019, 0.051544, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1914.0, 2058.0, 0.0061, 0.0313, 0.0847, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1914.0, 1953.0, 0.0113, 0.0675, 0.199492, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[49.0, 2171.0, 0.001603, 0.012145, 0.034808, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[49.0, 2169.0, 0.001099, 0.008326, 0.023862, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2218.0, 2185.0, 0.001653, 0.010407, 0.0294, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1966.0, 0.000152, 0.001935, 0.20991, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1966.0, 0.000124, 0.001938, 0.209752, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1848.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1847.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1846.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1849.0, 1845.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2074.0, 2233.0, 0.0045, 0.0226, 0.0614, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 2198.0, 0.003409, 0.020465, 0.11888, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 1829.0, 0.000246, 0.001611, 0.03219, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 1829.0, 0.000222, 0.001538, 0.032516, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 1867.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 1865.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 1840.0, 0.002366, 0.01494, 0.043588, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 2073.0, 0.001, 0.0068, 0.0192, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 2073.0, 0.001, 0.0072, 0.0196, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 2169.0, 0.0016, 0.008, 0.0176, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2172.0, 2169.0, 0.002, 0.0121, 0.0176, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1973.0, 1868.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1973.0, 1866.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1973.0, 1897.0, 0.0014, 0.0163, 1.604962, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1973.0, 1926.0, 0.000371, 0.004039, 0.2452, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 2221.0, 0.002538, 0.018658, 0.057658, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1947.0, 0.000244, 0.001883, 0.006854, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1947.0, 0.000319, 0.001779, 0.007006, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1947.0, 0.000316, 0.001744, 0.006838, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 2216.0, 0.0032, 0.01325, 0.0247, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 2220.0, 0.000283, 0.001786, 0.007918, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 2220.0, 0.000276, 0.001786, 0.00784, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1823.0, 0.006105, 0.032408, 0.092494, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1823.0, 0.006105, 0.032408, 0.092494, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 2214.0, 0.00572, 0.02325, 0.0247, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1799.0, 1970.0, 0.000271, 0.002947, 0.303246, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1799.0, 1798.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1799.0, 1897.0, 0.000631, 0.009242, 0.194064, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1799.0, 1969.0, 9.4e-05, 0.000882, 0.09577, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1798.0, 1972.0, 0.00026, 0.00296, 0.303556, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1798.0, 1897.0, 0.000581, 0.009148, 0.197, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1798.0, 1969.0, 9.5e-05, 0.000894, 0.096712, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1776.0, 2066.0, 0.000748, 0.003551, 0.009954, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1776.0, 2066.0, 0.000748, 0.003551, 0.009954, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2013.0, 1806.0, 0.004027, 0.025987, 0.06444, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2013.0, 1819.0, 0.000878, 0.008242, 0.022352, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2013.0, 1819.0, 0.001401, 0.008357, 0.023872, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2069.0, 1930.0, 0.003186, 0.016051, 0.046862, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2069.0, 1930.0, 0.003638, 0.018825, 0.052778, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2069.0, 1942.0, 0.001495, 0.008215, 0.023988, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2069.0, 1932.0, 0.003694, 0.020963, 0.05775, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2095.0, 1991.0, 0.0038, 0.0265, 0.0452, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2095.0, 1774.0, 0.002207, 0.016799, 0.049234, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2095.0, 1774.0, 0.002207, 0.016799, 0.049234, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2206.0, 1954.0, 0.000436, 0.003126, 0.010554, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2206.0, 1954.0, 0.00048, 0.003156, 0.010722, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2206.0, 2205.0, 0.0035, 0.0208, 0.0568, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2154.0, 2232.0, 0.001636, 0.007686, 0.020984, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2154.0, 2232.0, 0.001636, 0.007686, 0.020984, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2154.0, 1824.0, 0.001747, 0.011028, 0.02, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2068.0, 2174.0, 0.0053, 0.0356, 0.1608, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1995.0, 2127.0, 0.002277, 0.013038, 0.02106, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1995.0, 2185.0, 0.009767, 0.035062, 0.048936, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1995.0, 2185.0, 0.005959, 0.032066, 0.049696, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1819.0, 2062.0, 0.003176, 0.015785, 0.043182, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1819.0, 1953.0, 0.004039, 0.022981, 0.066948, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1775.0, 1817.0, 0.00056, 0.004262, 0.012492, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1775.0, 1817.0, 0.00056, 0.004262, 0.012492, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2067.0, 2004.0, 0.0011, 0.0053, 0.0164, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2067.0, 2066.0, 0.0035, 0.01357, 0.0193, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2205.0, 2130.0, 0.005, 0.0289, 0.081, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2205.0, 2130.0, 0.003152, 0.02578, 0.0731, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2177.0, 0.002603, 0.021498, 0.07278, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2177.0, 0.002582, 0.021425, 0.0731, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1919.0, 0.001405, 0.011326, 0.219716, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1919.0, 0.00139, 0.011124, 0.22341, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2156.0, 0.005768, 0.043001, 0.127542, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2156.0, 0.005768, 0.043001, 0.127542, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2175.0, 0.002549, 0.017938, 0.059848, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2175.0, 0.002488, 0.01794, 0.059848, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2126.0, 0.002403, 0.02124, 0.071276, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2126.0, 0.002353, 0.021196, 0.072128, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1833.0, 0.003269, 0.018545, 0.027674, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1833.0, 0.003269, 0.018545, 0.027674, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1920.0, 1833.0, 0.003269, 0.018545, 0.027674, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1832.0, 0.000607, 0.004514, 0.015152, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 2.0, 0.000607, 0.004504, 0.015044, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 1790.0, 0.000544, 0.007352, 0.76844, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 1790.0, 0.000544, 0.007352, 0.76844, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 1786.0, 0.000733, 0.009358, 1.015624, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 1786.0, 0.000733, 0.009358, 1.015624, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 123.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 2079.0, 0.000508, 0.0044, 0.4396, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 2081.0, 0.000464, 0.00536, 0.5338, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[123.0, 1959.0, 0.000968, 0.01148, 1.1461, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1978.0, 2183.0, 0.0019, 0.0102, 0.0276, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1978.0, 1888.0, 0.0035, 0.0221, 0.064074, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1978.0, 1888.0, 0.0036, 0.0222, 0.064304, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2121.0, 2071.0, 0.0028, 0.0171, 0.0458, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[37.0, 2149.0, 0.001399, 0.00713, 0.021124, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1791.0, 2187.0, 0.000547, 0.004293, 0.012496, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1791.0, 2187.0, 0.000564, 0.003571, 0.010164, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2087.0, 2203.0, 0.01588, 0.0793, 0.1166, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1840.0, 1782.0, 0.002004, 0.011367, 0.016964, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1888.0, 42.0, 0.001897, 0.010818, 0.029982, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2065.0, 2064.0, 0.0047, 0.0232, 0.0596, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2065.0, 1825.0, 0.010653, 0.057707, 0.104974, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2182.0, 1831.0, 0.006864, 0.041913, 0.08442, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2182.0, 2097.0, 0.001925, 0.009143, 0.02563, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2182.0, 2120.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2182.0, 44.0, 0.007721, 0.036266, 0.099012, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2120.0, 1454.0, 0.0152, 0.069, 0.1232, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2120.0, 2068.0, 0.0076, 0.0355, 0.1318, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2120.0, 2124.0, 0.0107, 0.0548, 0.1562, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2120.0, 2063.0, 0.0078, 0.0253, 0.08, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1958.0, 2230.0, 0.000968, 0.01148, 1.2124, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1765.0, 2212.0, 0.004241, 0.030126, 0.085066, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1765.0, 1909.0, 0.009008, 0.044028, 0.077024, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2102.0, 0.0019, 0.0088, 0.0194, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2102.0, 0.0016, 0.0072, 0.021, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2102.0, 0.001246, 0.007242, 0.0218, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 1942.0, 0.0066, 0.03245, 0.0523, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2061.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2058.0, 0.0101, 0.0509, 0.141, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2060.0, 0.0013, 0.0092, 0.025, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2060.0, 0.00201, 0.01179, 0.0338, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 2059.0, 0.0034, 0.01617, 0.044, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 1953.0, 0.0025, 0.014, 0.036, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 1953.0, 0.0025, 0.014, 0.036, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2003.0, 0.001561, 0.014418, 1.393376, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2141.0, 0.000512, 0.008616, 0.84623, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2010.0, 0.000932, 0.01154, 1.07545, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2009.0, 0.001, 0.0116, 1.0912, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2140.0, 0.0007, 0.008796, 0.873706, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 2056.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2206.0, 0.00062, 0.00339, 0.00774, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2206.0, 0.00054, 0.00357, 0.00774, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2205.0, 0.003, 0.0161, 0.0416, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2207.0, 2054.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2052.0, 1e-05, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2018.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 1784.0, 0.00052, 0.00287, 0.00941, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 1784.0, 0.00052, 0.00287, 0.00941, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2207.0, 2053.0, 0.0015, 0.0078, 0.022, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2052.0, 2051.0, 0.0013, 0.0078, 0.0226, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2079.0, 315.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2079.0, 2050.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2079.0, 2019.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2079.0, 2081.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2079.0, 2230.0, 0.000544, 0.007352, 0.76844, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2081.0, 307.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2081.0, 2230.0, 0.00054, 0.00738, 0.766086, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 2187.0, 0.00126, 0.007397, 0.019756, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 1916.0, 0.000818, 0.0061, 0.001808, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2124.0, 1916.0, 0.000818, 0.0061, 0.001808, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2124.0, 6.0, 0.000717, 0.002597, 0.003648, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 2121.0, 0.002019, 0.0095, 0.046, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 2014.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 2006.0, 0.0087, 0.0339, 0.2008, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 1774.0, 0.001156, 0.006379, 0.020912, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2124.0, 1774.0, 0.001156, 0.006379, 0.020912, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2014.0, 2174.0, 0.0026, 0.0129, 0.0374, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2014.0, 2174.0, 0.0023, 0.0129, 0.0374, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2014.0, 2121.0, 0.002312, 0.016324, 0.04676, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2014.0, 2063.0, 0.0081, 0.0314, 0.0662, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 1773.0, 0.000279, 0.003874, 0.381812, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 1773.0, 0.000279, 0.003874, 0.381812, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 2229.0, 0.000612, 0.007548, 0.76969, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 2229.0, 0.000684, 0.007548, 0.761836, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 2024.0, 0.000436, 0.006384, 0.62015, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 2024.0, 0.00044, 0.00638, 0.6202, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2230.0, 2024.0, 0.00044, 0.00638, 0.6202, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2071.0, 2070.0, 0.0004, 0.0025, 0.0666, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2071.0, 2070.0, 0.0003, 0.0013, 0.0666, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2071.0, 2108.0, 0.0025, 0.0133, 0.0396, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1769.0, 1844.0, 0.003178, 0.024071, 0.068986, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1769.0, 1844.0, 0.003178, 0.024071, 0.068986, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1773.0, 2024.0, 0.000296, 0.004117, 0.40581, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1773.0, 2024.0, 0.000296, 0.004117, 0.40581, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1843.0, 1954.0, 0.000196, 0.001444, 0.005702, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1843.0, 1954.0, 0.00017, 0.001475, 0.00593, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1990.0, 1781.0, 0.002351, 0.017893, 0.052442, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1990.0, 1781.0, 0.002515, 0.019148, 0.05612, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1990.0, 1791.0, 0.001184, 0.005796, 0.016876, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1990.0, 1791.0, 0.000773, 0.005178, 0.014792, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1990.0, 2091.0, 0.002873, 0.014873, 0.026988, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1990.0, 2091.0, 0.001843, 0.012695, 0.028906, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2092.0, 1949.0, 0.000576, 0.005568, 0.00912, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2075.0, 1776.0, 0.003123, 0.014847, 0.041616, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2075.0, 1776.0, 0.003123, 0.014847, 0.041616, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2075.0, 2066.0, 0.003, 0.0162, 0.0458, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2075.0, 2066.0, 0.003, 0.0162, 0.0458, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1909.0, 1831.0, 0.000425, 0.002347, 0.007694, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1909.0, 1831.0, 0.000425, 0.002347, 0.007694, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2004.0, 2000.0, 0.0043, 0.0189, 0.0516, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[50.0, 1894.0, 0.007438, 0.037376, 0.062508, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[50.0, 1894.0, 0.007438, 0.037376, 0.062508, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2180.0, 2166.0, 0.011111, 0.065754, 0.098978, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2180.0, 2134.0, 0.0056, 0.0304, 0.0504, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2131.0, 2000.0, 0.0109, 0.0472, 0.1306, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2131.0, 2064.0, 0.00604, 0.037441, 0.111652, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2131.0, 2064.0, 0.006511, 0.037267, 0.111562, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2131.0, 2065.0, 0.015, 0.0413, 0.0936, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2048.0, 2047.0, 0.0049, 0.021, 0.034, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2048.0, 2214.0, 0.0132, 0.0474, 0.074, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1913.0, 2153.0, 0.0017, 0.0122, 0.03806, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1913.0, 2153.0, 0.0017, 0.0123, 0.038104, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1913.0, 2132.0, 0.0015, 0.0104, 0.03276, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1913.0, 2132.0, 0.0014, 0.0105, 0.03257, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1850.0, 2204.0, 0.0007, 0.003549, 0.011358, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1850.0, 2204.0, 0.00068, 0.003595, 0.011282, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1935.0, 1934.0, 0.00093, 0.005165, 0.014484, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2046.0, 2010.0, 0.00011, 0.0016, 0.157, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2046.0, 2010.0, 0.000112, 0.001608, 0.1727, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2046.0, 2045.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2045.0, 2010.0, 0.00011, 0.0016, 0.157, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2044.0, 2045.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2058.0, 1933.0, 0.001967, 0.011025, 0.032296, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2058.0, 1934.0, 0.00524, 0.028022, 0.078426, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2084.0, 1779.0, 0.003284, 0.025003, 0.07328, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2084.0, 1779.0, 0.003284, 0.025003, 0.07328, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2195.0, 2196.0, 0.0006, 0.0034, 0.016282, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1764.0, 1831.0, 4.9e-05, 0.000287, 0.001824, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[56.0, 2153.0, 0.003648, 0.013602, 0.02284, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2042.0, 2041.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2040.0, 2041.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2039.0, 2038.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2037.0, 2038.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2006.0, 1769.0, 0.005199, 0.039577, 0.115992, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2028.0, 1907.0, 0.001632, 0.014674, 0.046224, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2028.0, 1955.0, 1e-06, 1e-05, 0.0, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2028.0, 2228.0, 0.0022, 0.016793, 0.049218, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1805.0, 2064.0, 0.004105, 0.025004, 0.073654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1989.0, 2075.0, 0.002775, 0.01195, 0.031086, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1989.0, 2075.0, 0.002042, 0.009724, 0.0056, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2036.0, 1777.0, 0.001686, 0.01625, 0.028548, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2036.0, 1776.0, 0.002319, 0.017657, 0.05175, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2036.0, 1776.0, 0.002319, 0.017657, 0.05175, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2158.0, 2159.0, 0.003785, 0.035893, 0.102126, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2158.0, 1832.0, 0.003733, 0.026363, 0.08693, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2158.0, 2.0, 0.003679, 0.026454, 0.08693, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2063.0, 2068.0, 0.0013, 0.0076, 0.1, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2085.0, 1949.0, 0.001026, 0.009918, 0.016246, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2060.0, 2101.0, 0.001194, 0.006769, 0.02107, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2060.0, 2101.0, 0.00123, 0.00755, 0.0216, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1828.0, 1827.0, 0.002291, 0.013129, 0.037544, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1951.0, 0.000967, 0.005386, 0.015858, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1951.0, 0.00083, 0.005543, 0.015894, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1800.0, 0.0032, 0.0256, 0.050238, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1800.0, 0.0032, 0.0256, 0.050238, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1952.0, 0.0053, 0.0287, 0.043366, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1888.0, 0.0046, 0.0265, 0.07574, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1888.0, 0.0049, 0.0281, 0.076512, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1893.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1891.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 2047.0, 0.003, 0.0182, 0.052822, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 2047.0, 0.003, 0.0183, 0.052868, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1827.0, 0.000858, 0.005166, 0.015054, 10.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1894.0, 1827.0, 0.000914, 0.005525, 0.01506, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1897.0, 1895.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1897.0, 1892.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[120.0, 1897.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2047.0, 1917.0, 0.006735, 0.04502, 0.1218, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2047.0, 1978.0, 0.005, 0.0273, 0.0742, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2047.0, 2048.0, 0.011661, 0.047648, 0.068356, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2047.0, 2163.0, 0.0157, 0.0776, 0.1892, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1762.0, 1921.0, 0.004241, 0.030126, 0.085066, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1912.0, 0.0035, 0.0199, 0.055758, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 2167.0, 0.0014, 0.0093, 0.02272, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 2167.0, 0.0026, 0.0129, 0.0206, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 2224.0, 0.0008, 0.00608, 0.018, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 2224.0, 0.0007, 0.0061, 0.01778, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1982.0, 0.004371, 0.036771, 0.102082, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1911.0, 0.000587, 0.005466, 0.015722, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1911.0, 0.001272, 0.011845, 0.034066, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1995.0, 0.0032, 0.0166, 0.0476, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 2035.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1980.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2225.0, 1983.0, 0.005, 0.0147, 0.0374, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 1966.0, 0.000356, 0.005065, 0.51967, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 2003.0, 0.00121, 0.01355, 1.2482, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 1772.0, 0.000317, 0.00405, 0.439468, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2034.0, 1772.0, 0.000309, 0.004298, 0.42362, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2034.0, 2033.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 1981.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 2032.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2034.0, 1771.0, 0.000759, 0.010812, 1.0325, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[121.0, 2034.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1801.0, 2131.0, 0.0037, 0.0294, 0.085666, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2220.0, 2170.0, 0.000467, 0.004897, 0.015144, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2220.0, 2170.0, 0.000467, 0.0049, 0.015136, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2030.0, 1940.0, 0.000667, 0.003612, 0.055194, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2204.0, 1844.0, 0.001053, 0.007978, 0.022864, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2204.0, 1844.0, 0.001053, 0.007978, 0.022864, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2204.0, 2206.0, 0.0023, 0.0127, 0.033, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2233.0, 1992.0, 0.0055, 0.0269, 0.044, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2233.0, 1871.0, 0.0055, 0.0269, 0.044, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2233.0, 2190.0, 0.0017, 0.0128, 0.0398, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2233.0, 2228.0, 0.001919, 0.010339, 0.029802, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2233.0, 2228.0, 0.003985, 0.013988, 0.035304, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2223.0, 2169.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2223.0, 2222.0, 0.003, 0.0199, 0.0546, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2223.0, 2222.0, 0.002477, 0.015386, 0.086506, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1946.0, 2124.0, 0.002181, 0.012442, 0.034482, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1946.0, 1769.0, 0.004399, 0.033488, 0.098148, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2213.0, 2212.0, 0.00872, 0.0415, 0.0603, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1823.0, 1822.0, 0.001557, 0.008831, 0.013178, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1823.0, 1822.0, 0.001557, 0.008831, 0.013178, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1992.0, 47.0, 0.008124, 0.030296, 0.05087, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1992.0, 1871.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[38.0, 1921.0, 0.005421, 0.030248, 0.044896, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1832.0, 2.0, 0.0, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2199.0, 2163.0, 0.012972, 0.060245, 0.0882, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2029.0, 1825.0, 0.002794, 0.015736, 0.030542, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2029.0, 1825.0, 0.002779, 0.016037, 0.030802, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2029.0, 2004.0, 0.0061, 0.0282, 0.0736, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2029.0, 119.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2161.0, 2165.0, 0.002758, 0.017246, 0.05042, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2161.0, 2165.0, 0.00281, 0.017192, 0.050784, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2190.0, 1955.0, 0.0015, 0.005, 0.008, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2059.0, 1933.0, 0.007141, 0.03759, 0.110426, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2059.0, 2060.0, 0.001137, 0.007726, 0.021632, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2066.0, 1777.0, 0.008535, 0.047552, 0.135966, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2066.0, 2036.0, 0.0277, 0.0546, 0.1086, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2066.0, 1817.0, 0.001193, 0.008897, 0.028558, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2066.0, 1817.0, 0.001271, 0.008926, 0.028726, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2214.0, 1822.0, 0.001297, 0.008265, 0.028008, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2214.0, 2048.0, 0.004664, 0.019059, 0.027342, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2228.0, 2188.0, 0.0032, 0.0124, 0.033, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 47.0, 0.002432, 0.009068, 0.015226, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 1907.0, 0.000749, 0.006419, 0.019036, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 1907.0, 0.000404, 0.006082, 0.019234, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 48.0, 0.002281, 0.010715, 0.029254, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 48.0, 0.002281, 0.010715, 0.029254, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 2028.0, 0.003431, 0.018104, 0.05278, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 2028.0, 0.002438, 0.018489, 0.053282, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 2025.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2024.0, 1790.0, 0.000393, 0.006763, 0.725106, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2024.0, 2139.0, 0.0012, 0.0095, 0.8706, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2024.0, 2034.0, 0.0009, 0.0131, 1.2058, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2024.0, 2023.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2024.0, 1771.0, 0.00041, 0.005233, 0.567852, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2024.0, 1771.0, 0.000362, 0.005035, 0.496268, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1816.0, 2003.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1816.0, 1899.0, 0.00067, 0.01333, 1.33542, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1815.0, 2003.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1815.0, 1899.0, 0.00067, 0.01333, 1.33542, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1923.0, 1807.0, 0.004043, 0.031502, 0.092992, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1837.0, 0.00419, 0.032116, 0.097538, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1837.0, 0.003923, 0.032344, 0.097258, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 2106.0, 0.005601, 0.039221, 0.120638, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1923.0, 2106.0, 0.00442, 0.04115, 0.118408, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1921.0, 0.008033, 0.074789, 0.215092, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1968.0, 8.3e-05, 0.001479, 0.004712, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1968.0, 6.2e-05, 0.001495, 0.004682, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 2178.0, 0.001489, 0.009279, 0.019006, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 2178.0, 0.0019, 0.008904, 0.019006, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1818.0, 0.000639, 0.003844, 0.011098, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1818.0, 0.000629, 0.00385, 0.011346, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1899.0, 2136.0, 0.000834, 0.010243, 0.944442, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1899.0, 2144.0, 0.000915, 0.009985, 0.950792, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1899.0, 500.0, 0.00067, 0.01333, 1.33542, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1899.0, 499.0, 0.00067, 0.01333, 1.33542, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1836.0, 1968.0, 0.001023, 0.007793, 0.02284, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1836.0, 1968.0, 0.001023, 0.007793, 0.02284, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1835.0, 1899.0, 3.5e-05, 0.000554, 0.01563, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 2160.0, 0.000808, 0.00615, 0.018024, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 2160.0, 0.000808, 0.00615, 0.018024, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1795.0, 0.002839, 0.021615, 0.06335, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1795.0, 0.002839, 0.021615, 0.06335, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 2210.0, 0.001992, 0.015161, 0.044434, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 2210.0, 0.002895, 0.022041, 0.0646, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1844.0, 0.002519, 0.019179, 0.056212, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1768.0, 1994.0, 0.002367, 0.013057, 0.042808, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1994.0, 0.001992, 0.015161, 0.044434, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1910.0, 0.001432, 0.010899, 0.031942, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1910.0, 0.001432, 0.010899, 0.031942, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2196.0, 2008.0, 0.002104, 0.008588, 0.01563, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2196.0, 2016.0, 0.002104, 0.008588, 0.01563, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2196.0, 1852.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1926.0, 1853.0, 1e-06, 1e-06, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1830.0, 2159.0, 0.005669, 0.029498, 0.084286, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1830.0, 1831.0, 0.005312, 0.030531, 0.088372, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1830.0, 1831.0, 0.005391, 0.030252, 0.088402, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1830.0, 2097.0, 0.003948, 0.020204, 0.05813, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1983.0, 1950.0, 0.0012, 0.0116, 0.019, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2086.0, 2030.0, 0.00086, 0.004229, 0.012674, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2185.0, 2217.0, 0.0024, 0.0101, 0.0152, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2027.0, 1947.0, 0.000579, 0.003409, 0.008058, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2027.0, 1947.0, 0.000579, 0.00341, 0.00806, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2027.0, 1822.0, 0.003665, 0.023351, 0.069198, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1860.0, 1956.0, 0.000192, 0.001612, 0.007754, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1860.0, 1956.0, 0.00019, 0.001612, 0.008058, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[39.0, 2146.0, 0.005056, 0.02051, 0.02918, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1994.0, 2160.0, 0.003787, 0.015066, 0.02744, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1994.0, 1844.0, 0.006343, 0.034897, 0.072984, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1994.0, 2088.0, 0.003409, 0.018265, 0.06, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1994.0, 2088.0, 0.00339, 0.018097, 0.06, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1774.0, 2125.0, 0.000519, 0.002865, 0.009394, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1774.0, 2125.0, 0.000519, 0.002865, 0.009394, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2053.0, 2051.0, 1e-05, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1900.0, 2196.0, 0.00048, 0.0046, 0.0076, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2091.0, 1781.0, 0.000508, 0.003865, 0.011328, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2091.0, 1787.0, 0.000211, 0.000705, 0.03415, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2091.0, 1.0, 0.0, 1e-06, 2e-06, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1.0, 1781.0, 0.00044, 0.003349, 0.009814, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1.0, 1787.0, 0.000216, 0.000738, 0.035304, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1803.0, 2153.0, 0.004651, 0.032568, 0.093178, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1905.0, 2129.0, 0.004099, 0.034324, 0.09695, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1904.0, 2129.0, 0.004105, 0.025004, 0.073654, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2108.0, 2124.0, 0.004633, 0.02824, 0.08162, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2108.0, 1769.0, 0.003559, 0.027095, 0.07941, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2108.0, 1769.0, 0.003559, 0.027095, 0.07941, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2108.0, 1945.0, 0.00096, 0.00928, 0.0152, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1941.0, 1829.0, 0.001096, 0.005395, 0.043434, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2021.0, 2020.0, 0.00781, 0.0352, 0.0262, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2021.0, 2091.0, 0.014, 0.0727, 0.110892, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2163.0, 1783.0, 0.004747, 0.036136, 0.10591, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2163.0, 2026.0, 0.0123, 0.0679, 0.104, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1902.0, 1903.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1859.0, 2204.0, 0.0049, 0.0288, 0.08016, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[2222.0, 1917.0, 0.002438, 0.01471, 0.04222, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1950.0, 2215.0, 0.00095, 0.005619, 0.018094, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1950.0, 2215.0, 0.001591, 0.007644, 0.012924, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1950.0, 2218.0, 0.003325, 0.02037, 0.03325, 100.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[316.0, 315.0, 0.001572, 0.02166, 3.44616, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[310.0, 307.0, 0.001592, 0.021628, 3.43046, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1922.0, 1921.0, 0.0055, 0.0332, 0.048824, 100.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[482.0, 1789.0, 0.001904, 0.030428, 2.94106, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[484.0, 483.0, 0.001926, 0.030303, 2.93952, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[508.0, 1899.0, 0.001544, 0.016148, 1.54645, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[508.0, 1899.0, 0.00134, 0.014248, 1.32665, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[508.0, 482.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[508.0, 484.0, 0.0, 0.0001, 0.0, 400.0, 0.0,0.0,0.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[500.0, 508.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[499.0, 508.0, 0.0, 1e-05, 0.0, 400.0, 0.0,0.0,0.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1685.0, 1869.0, 0.00131, 0.072778, 0.0027, 180.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1706.0, 1985.0, 0.0003, 0.019557, 0.0, 360.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1642.0, 1763.0, 0.002379, 0.1292, 0.0029, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1747.0, 2181.0, 0.0047, 0.1573, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1746.0, 2181.0, 0.0047, 0.156, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[31.0, 57.0, 0.0047, 0.1573, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[30.0, 57.0, 0.0047, 0.1573, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[23.0, 40.0, 0.002828, 0.1393, 0.0011, 100.0, 0.0,0.0,0.940909, 0.0,1.0,-30.0, 30.0, 0.1 ],
[4.0, 3.0, 0.002083, 0.116667, 0.00156, 120.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1666.0, 1810.0, 0.000508, 0.037, 0.004284, 420.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1665.0, 1810.0, 0.000507, 0.036952, 0.003864, 420.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1745.0, 2171.0, 0.000585, 0.034067, 0.006103, 436.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1744.0, 2171.0, 0.000585, 0.034067, 0.061027, 436.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1743.0, 2171.0, 0.000526, 0.030275, 0.00981, 418.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1742.0, 2171.0, 0.000526, 0.030275, 0.00981, 418.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1664.0, 1809.0, 0.0012, 0.074111, 0.0018, 180.0, 0.0,0.0,1.097727, 0.0,0.0,-30.0, 30.0, 0.1 ],
[26.0, 53.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[28.0, 55.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[19.0, 36.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1741.0, 2162.0, 0.0006, 0.0345, 0.0, 418.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1740.0, 2162.0, 0.0006, 0.0343, 0.0, 418.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1670.0, 1841.0, 0.000544, 0.037838, 0.0148, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1669.0, 1841.0, 0.000544, 0.037838, 0.0148, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1687.0, 1906.0, 0.000791, 0.048433, 0.0033, 370.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1686.0, 1906.0, 0.000791, 0.048433, 0.0033, 370.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1729.0, 1986.0, 0.000659, 0.043486, 0.00189, 430.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1728.0, 2122.0, 0.000659, 0.043486, 0.00189, 430.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1696.0, 1937.0, 0.000802, 0.048833, 0.0051, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1695.0, 1792.0, 0.000802, 0.048833, 0.0051, 370.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1690.0, 1901.0, 0.002669, 0.136, 0.0009, 100.0, 0.0,0.0,1.00625, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1659.0, 1802.0, 0.002379, 0.1292, 0.0029, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1739.0, 2152.0, 0.0041, 0.0942, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1738.0, 2152.0, 0.001394, 0.0686, 0.005, 240.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1737.0, 2152.0, 0.002018, 0.0757, 0.00184, 240.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1707.0, 2152.0, 0.000659, 0.066286, 0.00819, 430.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1752.0, 2152.0, 0.000659, 0.041543, 0.00945, 430.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[13.0, 1820.0, 0.003265, 0.139, 0.00076, 120.0, 0.0,0.0,0.940909, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1703.0, 1984.0, 0.001884, 0.093333, 4.5e-05, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1702.0, 1984.0, 0.001871, 0.093333, 4.5e-05, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1704.0, 1984.0, 0.001876, 0.093333, 4.5e-05, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1705.0, 1984.0, 0.001867, 0.093333, 4.5e-05, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[34.0, 59.0, 0.0064, 0.1807, 0.0, 75.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[33.0, 58.0, 0.0064, 0.1807, 0.0, 75.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1678.0, 1854.0, 0.000769, 0.050067, 0.00276, 370.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1677.0, 1854.0, 0.000762, 0.0499, 0.00276, 370.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1655.0, 1826.0, 0.000959, 0.192917, 0.00084, 120.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[27.0, 54.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1657.0, 1793.0, 0.00298, 0.1364, 0.0013, 120.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1650.0, 1834.0, 7e-06, 0.00569, 0.01386, 1260.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1648.0, 1834.0, 7e-06, 0.00569, 0.01386, 1260.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[35.0, 1834.0, 7e-06, 0.00569, 0.01386, 1260.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1682.0, 1858.0, 0.000527, 0.04415, 0.0034, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1681.0, 1858.0, 0.000527, 0.04415, 0.0034, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2115.0, 2118.0, 0.0029, 0.0762, 0.0, 300.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2111.0, 2117.0, 0.0045, 0.1801, 0.0, 90.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2104.0, 2012.0, 0.005505, 0.199524, 0.001512, 63.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1736.0, 2104.0, 0.006292, 0.268, 0.00075, 50.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1735.0, 2104.0, 0.006204, 0.268, 0.00075, 50.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1734.0, 2149.0, 0.002101, 0.056458, 0.014304, 240.0, 0.0,0.0,1.1, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1733.0, 2149.0, 0.001332, 0.059167, 0.008592, 240.0, 0.0,0.0,1.1, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1732.0, 2149.0, 0.001465, 0.057917, 0.009744, 240.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1694.0, 1936.0, 0.000531, 0.036378, 0.00407, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1693.0, 1936.0, 0.000531, 0.036378, 0.00407, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[25.0, 52.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1701.0, 1959.0, 0.000326, 0.0237, 0.0072, 720.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1700.0, 1959.0, 0.000326, 0.0237, 0.0072, 720.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1652.0, 1788.0, 0.003869, 0.14, 0.002, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1645.0, 1767.0, 0.0115, 0.2541, 0.0, 400.0, 0.0,0.0,1.025, 0.0,1.0,-30.0, 30.0, 0.1 ],
[24.0, 1767.0, 0.0115, 0.2541, 0.0, 400.0, 0.0,0.0,1.025, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1656.0, 1929.0, 0.002209, 0.100333, 2.4e-05, 120.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[14.0, 1929.0, 0.002431, 0.116667, 6e-05, 120.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1644.0, 1766.0, 0.002379, 0.1292, 0.0029, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[12.0, 1857.0, 0.000929, 0.054167, 0.00648, 240.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 1857.0, 0.000948, 0.054167, 0.00648, 240.0, 0.0,0.0,1.09773, 0.0,1.0,-30.0, 30.0, 0.1 ],
[11.0, 1857.0, 0.003124, 0.133, 0.0022, 100.0, 0.0,0.0,1.04546, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1691.0, 2013.0, 0.004251, 0.1313, 0.0015, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1662.0, 2013.0, 0.001786, 0.099067, 0.003675, 180.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1731.0, 2095.0, 0.001658, 0.068, 0.0046, 240.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1730.0, 2095.0, 0.001598, 0.0681, 0.004, 240.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1649.0, 1775.0, 0.000575, 0.044846, 0.003081, 390.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[32.0, 1775.0, 0.000575, 0.044846, 0.003081, 390.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1651.0, 1814.0, 0.0006, 0.0441, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1653.0, 1814.0, 0.0006, 0.0441, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1654.0, 1814.0, 0.0006, 0.0441, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1674.0, 1814.0, 0.0006, 0.0441, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[20.0, 37.0, 0.002851, 0.13, 0.00066, 100.0, 0.0,0.0,1.05852, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1668.0, 2182.0, 0.0029, 0.0694, 0.0107, 720.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1727.0, 2120.0, 0.000367, 0.023333, 0.0321, 260.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1726.0, 2120.0, 0.000367, 0.023333, 0.0321, 260.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1697.0, 1958.0, 0.000117, 0.023367, 0.01176, 720.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1643.0, 1765.0, 0.002379, 0.1292, 0.0029, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1725.0, 2071.0, 0.0013, 0.0643, 0.0, 240.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1724.0, 2071.0, 0.0013, 0.0643, 0.0, 240.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1710.0, 2071.0, 0.0013, 0.0643, 0.0, 240.0, 0.0,0.0,1.06818, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1672.0, 1843.0, 0.000575, 0.044846, 0.003081, 390.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1671.0, 1843.0, 0.000575, 0.044846, 0.003081, 390.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1723.0, 2011.0, 0.005759, 0.207937, 0.001512, 32.0, 0.0,0.0,1.0375, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1722.0, 2180.0, 0.004, 0.119, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1721.0, 2180.0, 0.004, 0.119, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1720.0, 2180.0, 0.004, 0.119, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1719.0, 2180.0, 0.0054, 0.116, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1676.0, 1850.0, 0.000178, 0.053846, 0.0, 260.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1675.0, 1850.0, 0.000178, 0.053846, 0.0, 260.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1718.0, 2045.0, 0.000218, 0.01863, 0.0, 120.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1717.0, 2046.0, 0.000218, 0.01827, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1692.0, 2045.0, 0.000175, 0.015526, 0.013338, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1663.0, 2045.0, 0.000175, 0.015526, 0.013338, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1709.0, 2195.0, 0.001558, 0.08475, 0.00336, 160.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1708.0, 2195.0, 0.001879, 0.088667, 0.00435, 160.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[5.0, 1764.0, 0.002083, 0.116667, 0.00156, 120.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[29.0, 56.0, 0.002914, 0.127, 0.0012, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2038.0, 2096.0, 0.0022, 0.114, 0.0, 120.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1661.0, 1805.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1699.0, 2229.0, 0.000375, 0.022667, 0.00294, 720.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1698.0, 2229.0, 0.001028, 0.046333, 0.0054, 720.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1714.0, 2158.0, 0.0008, 0.0461, 0.0, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1713.0, 2158.0, 0.0008, 0.0463, 0.0, 370.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1716.0, 2229.0, 0.0008, 0.0451, 0.0, 370.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1715.0, 2229.0, 0.0007, 0.0411, 0.0, 370.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1680.0, 1828.0, 0.002439, 0.111755, 0.000752, 120.0, 0.0,0.0,0.988943, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1641.0, 1762.0, 0.003175, 0.1308, 0.00239, 100.0, 0.0,0.0,1.05852, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1658.0, 1801.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[21.0, 38.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1667.0, 1836.0, 0.000318, 0.02355, 0.00108, 720.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1673.0, 1835.0, 0.000328, 0.023833, 0.00168, 720.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1712.0, 2027.0, 0.0006, 0.0348, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1711.0, 2027.0, 0.0006, 0.0348, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1749.0, 1969.0, 0.000223, 0.0195, 0.004392, 720.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1748.0, 1969.0, 0.000228, 0.019319, 0.004248, 720.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1684.0, 1860.0, 0.000526, 0.037775, 0.0028, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1683.0, 1860.0, 0.000528, 0.0378, 0.00236, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[22.0, 39.0, 0.000706, 0.0772, 0.00092, 100.0, 0.0,0.0,1.05852, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1660.0, 1803.0, 0.003032, 0.14, 0.0013, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1689.0, 1905.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[117.0, 1905.0, 0.002828, 0.141, 1e-05, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[110.0, 1905.0, 0.002841, 0.141, 1e-05, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[108.0, 1905.0, 0.002828, 0.141, 1e-05, 100.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1688.0, 1904.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.075, 0.0,1.0,-30.0, 30.0, 0.1 ],
[118.0, 1904.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.075, 0.0,1.0,-30.0, 30.0, 0.1 ],
[111.0, 1904.0, 0.00297, 0.137, 0.0027, 100.0, 0.0,0.0,1.075, 0.0,1.0,-30.0, 30.0, 0.1 ],
[107.0, 1904.0, 0.00297, 0.137, 0.0027, 50.0, 0.0,0.0,1.075, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1751.0, 1902.0, 0.000223, 0.0195, 0.004176, 720.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1750.0, 1902.0, 0.000219, 0.019278, 0.00432, 720.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2194.0, 1633.0, 0.002, 0.0983, 0.0, 150.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1635.0, 1633.0, 0.0014, 0.0563, 0.0, 150.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1634.0, 1633.0, 0.0009, -0.003, 0.0, 75.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2194.0, 1631.0, 0.002, 0.0997, 0.0, 150.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1635.0, 1631.0, 0.0014, 0.0567, 0.0, 150.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1632.0, 1631.0, 0.0008, -0.0033, 0.0, 75.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2194.0, 1628.0, 0.001271, 0.096333, 0.00115, 150.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1630.0, 1628.0, 0.001185, 0.057, 0.00115, 150.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1629.0, 1628.0, 0.001033, -0.005, 0.00115, 75.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1965.0, 1587.0, 6.7e-05, 0.018139, 0.00103533, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2231.0, 1587.0, 5.6e-05, -0.00171, 0.00103533, 1002.0, 0.0,0.0,1.09773, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1964.0, 1587.0, 0.000397, 0.03773, 0.00103533, 270.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1961.0, 1586.0, 6.4e-05, 0.01821, 0.00103533, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1962.0, 1586.0, 5.9e-05, -0.00176, 0.00103533, 1002.0, 0.0,0.0,1.09773, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1963.0, 1586.0, 0.000397, 0.037788, 0.00103533, 270.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2002.0, 1627.0, 8.6e-05, 0.01918, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1999.0, 1627.0, 8.8e-05, -0.00199, 0.0, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1997.0, 1627.0, 0.000652, 0.04874, 0.0, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2001.0, 1626.0, 8.6e-05, 0.01918, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1998.0, 1626.0, 8.8e-05, -0.00199, 0.0, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1996.0, 1626.0, 0.000652, 0.04874, 0.0, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1970.0, 1592.0, 6.6e-05, 0.018757, 0.00120233, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1592.0, 5.9e-05, -0.00301, 0.00120233, 1002.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1864.0, 1592.0, 0.000397, 0.038328, 0.00120233, 330.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1972.0, 1591.0, 6.6e-05, 0.018757, 0.00126933, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2221.0, 1591.0, 5.9e-05, -0.00301, 0.00126933, 1002.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1863.0, 1591.0, 0.000397, 0.038328, 0.00126933, 330.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1772.0, 1556.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 1556.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1759.0, 1556.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1772.0, 1555.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1770.0, 1555.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1758.0, 1555.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,0.0,-30.0, 30.0, 0.1 ],
[1855.0, 1584.0, 8.3e-05, 0.021439, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1856.0, 1584.0, 6.5e-05, -0.00326, 0.0, 400.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1957.0, 1584.0, 0.000454, 0.038229, 0.0, 400.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1813.0, 1570.0, 7.8e-05, 0.018807, 0.001336, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1812.0, 1570.0, 5.7e-05, -0.00212, 0.001336, 1002.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1811.0, 1570.0, 0.000428, 0.033328, 0.001336, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1886.0, 1573.0, 6.3e-05, 0.018623, 0.00153633, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1887.0, 1573.0, 6.3e-05, -0.00257, 0.00153633, 1002.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1884.0, 1573.0, 0.000381, 0.035269, 0.00153633, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1927.0, 1578.0, 5.8e-05, 0.017275, 0.002004, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1925.0, 1578.0, 6.9e-05, -0.00173, 0.002004, 1002.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1885.0, 1578.0, 0.000349, 0.039152, 0.002004, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2143.0, 1624.0, 0.000125, 0.02587, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2150.0, 1624.0, 9.2e-05, -0.00513, 0.0, 750.0, 0.0,0.0,1.07273, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1625.0, 1624.0, 0.000505, 0.04532, 0.0, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2138.0, 1622.0, 0.000228, 0.02372, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2147.0, 1622.0, 0.000123, -0.00264, 0.0, 750.0, 0.0,0.0,1.06818, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1623.0, 1622.0, 0.000586, 0.02816, 0.0, 240.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1790.0, 1564.0, 9.6e-05, 0.0209, 0.002, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1564.0, 7.9e-05, -0.00277, 0.002, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1565.0, 1564.0, 0.000524, 0.052407, 0.002, 240.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1790.0, 1563.0, 9.6e-05, 0.0209, 0.002, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2094.0, 1563.0, 7.9e-05, -0.00277, 0.002, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1565.0, 1563.0, 0.000524, 0.052407, 0.002, 240.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2152.0, 1619.0, 0.00085, 0.01, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1621.0, 1619.0, 0.0048, 0.1195, 0.0, 400.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1620.0, 1619.0, 0.0027, 0.1195, 0.0, 400.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1875.0, 1590.0, 8e-05, 0.01881, 0.0, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1874.0, 1590.0, 0.00277, -0.00232, 0.0, 1002.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1873.0, 1590.0, 0.0004, 0.037, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1974.0, 1572.0, 8e-06, 0.018685, 0.00153333, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2202.0, 1572.0, -1e-05, -0.0033, 0.00153333, 10000.0, 0.0,0.0,1.01932, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1872.0, 1572.0, 0.000442, 0.039535, 0.00153333, 300.0, 0.0,0.0,0.978409, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2082.0, 1618.0, 0.000117, 0.02364, 0.00205, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2089.0, 1618.0, 4.2e-05, -0.00236, 0.00205, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2078.0, 1618.0, 0.000345, 0.031, 0.00205, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2083.0, 1617.0, 6.6e-05, 0.022113, 0.001075, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1617.0, 9e-05, -0.00185, 0.001075, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2077.0, 1617.0, 0.000509, 0.047513, 0.001075, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2080.0, 1616.0, 0.000115, 0.022847, 0.00225, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2088.0, 1616.0, 0.000118, -0.00186, 0.00225, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2076.0, 1616.0, 0.000507, 0.03022, 0.00225, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1786.0, 1562.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 1562.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1755.0, 1562.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1786.0, 1561.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1785.0, 1561.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1754.0, 1561.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1868.0, 1615.0, 0.000105, 0.01782, 0.003375, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1867.0, 1615.0, 5.8e-05, -0.00247, 0.003375, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2072.0, 1615.0, 0.000494, 0.030927, 0.003375, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1866.0, 1614.0, 7.9e-05, 0.019153, 0.00145, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1865.0, 1614.0, 6.4e-05, -0.00314, 0.00145, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2007.0, 1614.0, 0.000335, 0.030553, 0.00145, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1799.0, 1568.0, 7.8e-05, 0.018079, 0.001336, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1568.0, 4.9e-05, -0.00241, 0.001336, 1002.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1569.0, 1568.0, 0.000403, 0.038458, 0.001336, 300.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1798.0, 1566.0, 7.4e-05, 0.018598, 0.001837, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1797.0, 1566.0, 5.3e-05, -0.00316, 0.001837, 1002.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1567.0, 1566.0, 0.000378, 0.039316, 0.001837, 300.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2013.0, 1611.0, 0.001709, 0.13125, 0.000972, 120.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1613.0, 1611.0, 0.001024, 0.070417, 0.000972, 120.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1612.0, 1611.0, 0.001075, -0.00625, 0.000972, 120.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2013.0, 1608.0, 0.0021, 0.1588, 0.000972, 120.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1610.0, 1608.0, 0.0012, 0.0852, 0.000972, 120.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1609.0, 1608.0, 0.0013, 0.0063, 0.000972, 120.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1960.0, 1585.0, 7.3e-05, 0.018815, 0.00096667, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1585.0, 6e-05, -0.00139, 0.00096667, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1881.0, 1585.0, 0.000405, 0.037565, 0.00096667, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[123.0, 1583.0, 7.4e-05, 0.018955, 0.00096667, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1920.0, 1583.0, 6.1e-05, -0.00145, 0.00096667, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1808.0, 1583.0, 0.000406, 0.037395, 0.00096667, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2056.0, 1607.0, 8.6e-05, 0.012, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2061.0, 1607.0, 8.4e-05, 0.0052, 0.0, 750.0, 0.0,0.0,1.07045, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2055.0, 1607.0, 0.00064, 0.0098, 0.0, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2057.0, 1588.0, 8.2e-05, 0.01899, 0.0, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2062.0, 1588.0, 9.5e-05, 0.00187, 0.0, 750.0, 0.0,0.0,1.07045, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1967.0, 1588.0, 0.000595, 0.04896, 0.0, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2050.0, 1606.0, 0.000124, 0.026467, 0.003, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2054.0, 1606.0, 8.8e-05, -0.00659, 0.003, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2049.0, 1606.0, 0.000433, 0.03668, 0.003, 240.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2019.0, 1605.0, 6.9e-05, 0.01806, 0.000725, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2018.0, 1605.0, 8.7e-05, -0.00197, 0.000725, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2017.0, 1605.0, 0.000344, 0.03106, 0.000725, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2081.0, 1576.0, 5.9e-05, 0.017137, 0.0009, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2052.0, 1576.0, 7.4e-05, -0.0013, 0.0009, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1880.0, 1576.0, 0.000392, 0.036947, 0.0009, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 1604.0, 8.3e-05, 0.019047, 0.001425, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2124.0, 1604.0, 6.1e-05, -0.00317, 0.001425, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1878.0, 1604.0, 0.000339, 0.031247, 0.001425, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2230.0, 1582.0, 6e-05, 0.017225, 0.00096667, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2014.0, 1582.0, 7.3e-05, -0.00129, 0.00096667, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1877.0, 1582.0, 0.000392, 0.036925, 0.00096667, 330.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1773.0, 1558.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1769.0, 1558.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1761.0, 1558.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1773.0, 1557.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1769.0, 1557.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1760.0, 1557.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1787.0, 8.0, 0.000881, 0.085611, 0.000444, 180.0, 0.0,0.0,1.0625, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1646.0, 8.0, 0.000767, -0.00617, 0.000444, 180.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[10.0, 8.0, 9.1e-05, 0.051056, 0.000444, 90.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1787.0, 7.0, 0.000881, 0.085611, 0.000444, 180.0, 0.0,0.0,1.0625, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1647.0, 7.0, 0.000767, -0.00617, 0.000444, 180.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[9.0, 7.0, 9.1e-05, 0.051056, 0.000444, 90.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2046.0, 1603.0, 0.0, 0.04475, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1935.0, 1603.0, 0.0, -0.00462, 0.0, 400.0, 0.0,0.0,1.0725, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2043.0, 1603.0, 0.0, 0.07026, 0.0, 400.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2096.0, 1601.0, 0.0018, 0.1243, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1602.0, 1601.0, 0.0015, 0.0698, 0.0, 400.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2041.0, 1601.0, 0.0014, -0.0077, 0.0, 400.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2179.0, 1598.0, 0.0063, 0.2671, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1600.0, 1598.0, 0.0058, 0.1401, 0.0, 400.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1599.0, 1598.0, 0.003, -0.0097, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2179.0, 1596.0, 0.0063, 0.2652, 0.0, 400.0, 0.0,0.0,1.1, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1600.0, 1596.0, 0.0059, 0.1419, 0.0, 400.0, 0.0,0.0,1.04545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1597.0, 1596.0, 0.0028, -0.0079, 0.0, 400.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1895.0, 1575.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1893.0, 1575.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1890.0, 1575.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1892.0, 1574.0, 9.1e-05, 0.02099, 0.0, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1891.0, 1574.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1889.0, 1574.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2033.0, 1595.0, 8.5e-05, 0.01857, 0.00183333, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2035.0, 1595.0, 4.7e-05, -0.00287, 0.00183333, 1000.0, 0.0,0.0,1.09773, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2031.0, 1595.0, 0.000426, 0.03594, 0.00183333, 300.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1981.0, 1593.0, 7.3e-05, 0.0163, 0.001, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1980.0, 1593.0, 5.4e-05, -0.001, 0.001, 1000.0, 0.0,0.0,1.09773, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1979.0, 1593.0, 0.000377, 0.03705, 0.001, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2023.0, 1594.0, 0.000116, 0.018433, 0.002075, 750.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2025.0, 1594.0, 7.4e-05, -0.00326, 0.002075, 750.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2022.0, 1594.0, 0.000476, 0.032887, 0.002075, 240.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2024.0, 1589.0, 6.4e-05, 0.016337, 0.00120233, 1002.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2228.0, 1589.0, 6.3e-05, -0.0024, 0.00120233, 1002.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1862.0, 1589.0, 0.000244, 0.030978, 0.00120233, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1899.0, 1581.0, 8.5e-05, 0.018221, 0.001275, 750.0, 0.0,0.0,1.072, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1581.0, 8.5e-05, -0.00243, 0.001275, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1879.0, 1581.0, -9e-05, 0.041486, 0.001275, 240.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1899.0, 1579.0, 8.4e-05, 0.018087, 0.00135, 750.0, 0.0,0.0,1.072, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1923.0, 1579.0, 8.4e-05, -0.00222, 0.00135, 750.0, 0.0,0.0,1.07159, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1580.0, 1579.0, -8e-05, 0.04158, 0.00135, 240.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1771.0, 1560.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1560.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1757.0, 1560.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1771.0, 1559.0, 9.1e-05, 0.02099, 0.0, 10000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1768.0, 1559.0, 6.7e-05, -0.00349, 0.0, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1756.0, 1559.0, 0.00037, 0.03445, 0.0, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1853.0, 1571.0, 6.1e-05, 0.01713, 0.00126667, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1852.0, 1571.0, 7.3e-05, -0.00142, 0.00126667, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1851.0, 1571.0, 0.000408, 0.0376, 0.00126667, 330.0, 0.0,0.0,1.0, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1926.0, 1577.0, 5e-05, 0.01767, 0.00133333, 1000.0, 0.0,0.0,1.05, 0.0,1.0,-30.0, 30.0, 0.1 ],
[2196.0, 1577.0, 7e-05, -0.00193, 0.00133333, 1000.0, 0.0,0.0,1.04546, 0.0,1.0,-30.0, 30.0, 0.1 ],
[1882.0, 1577.0, 0.000396, 0.03757, 0.00133333, 330.0, 0.0,0.0,0.954545, 0.0,1.0,-30.0, 30.0, 0.1 ]
])
ppc["gencost"] = array([
[2.0, 0.0, 0.0, 3.0, 0.0, 44.0, 0.0, 66.0, 33.0, 52.8, 26.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 44.0, 0.0, 66.0, 33.0, 52.8, 26.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 50.0, 0.0, 75.0, 37.5, 60.0, 30.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 62.8, 0.0, 94.2, 47.1, 75.36, 37.68 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 30.0, 0.0, 45.0, 22.5, 36.0, 18.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 30.0, 0.0, 45.0, 22.5, 36.0, 18.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 40.0, 0.0, 60.0, 30.0, 48.0, 24.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 40.0, 0.0, 60.0, 30.0, 48.0, 24.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 140.0, 0.0, 210.0, 105.0, 168.0, 84.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 400.0, 0.0, 600.0, 300.0, 480.0, 240.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 400.0, 0.0, 600.0, 300.0, 480.0, 240.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 64.0, 0.0, 96.0, 48.0, 76.8, 38.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 64.0, 0.0, 96.0, 48.0, 76.8, 38.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 64.0, 0.0, 96.0, 48.0, 76.8, 38.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 29.0, 0.0, 43.5, 21.75, 34.8, 17.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 29.0, 0.0, 43.5, 21.75, 34.8, 17.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 29.0, 0.0, 43.5, 21.75, 34.8, 17.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 14.4, 0.0, 21.6, 10.8, 17.28, 8.64 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 14.4, 0.0, 21.6, 10.8, 17.28, 8.64 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 16.8, 0.0, 25.2, 12.6, 20.16, 10.08 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 16.8, 0.0, 25.2, 12.6, 20.16, 10.08 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 48.0, 0.0, 72.0, 36.0, 57.6, 28.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 48.0, 0.0, 72.0, 36.0, 57.6, 28.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 40.0, 0.0, 60.0, 30.0, 48.0, 24.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 40.0, 0.0, 60.0, 30.0, 48.0, 24.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 60.0, 0.0, 90.0, 45.0, 72.0, 36.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 60.0, 0.0, 90.0, 45.0, 72.0, 36.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 84.0, 0.0, 126.0, 63.0, 100.8, 50.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 39.6, 0.0, 59.4, 29.7, 47.52, 23.76 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 80.0, 0.0, 120.0, 60.0, 96.0, 48.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 20.0, 0.0, 30.0, 15.0, 24.0, 12.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 36.0, 0.0, 54.0, 27.0, 43.2, 21.6 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 36.0, 0.0, 54.0, 27.0, 43.2, 21.6 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 36.0, 0.0, 54.0, 27.0, 43.2, 21.6 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 36.0, 0.0, 54.0, 27.0, 43.2, 21.6 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 62.8, 0.0, 94.2, 47.1, 75.36, 37.68 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 62.8, 0.0, 94.2, 47.1, 75.36, 37.68 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 320.0, 0.0, 480.0, 240.0, 384.0, 192.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 320.0, 0.0, 480.0, 240.0, 384.0, 192.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 403.2, 0.0, 604.8, 302.4, 483.84, 241.92 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 403.2, 0.0, 604.8, 302.4, 483.84, 241.92 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 54.0, 0.0, 81.0, 40.5, 64.8, 32.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 54.0, 0.0, 81.0, 40.5, 64.8, 32.4 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 18.0, 0.0, 27.0, 13.5, 21.6, 10.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 18.0, 0.0, 27.0, 13.5, 21.6, 10.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 18.0, 0.0, 27.0, 13.5, 21.6, 10.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 18.0, 0.0, 27.0, 13.5, 21.6, 10.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 20.0, 0.0, 30.0, 15.0, 24.0, 12.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 20.0, 0.0, 30.0, 15.0, 24.0, 12.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 120.0, 0.0, 180.0, 90.0, 144.0, 72.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 208.0, 0.0, 312.0, 156.0, 249.6, 124.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 12.0, 6.0, 9.6, 4.8 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ],
[2.0, 0.0, 0.0, 3.0, 0.0, 240.0, 0.0, 360.0, 180.0, 288.0, 144.0 ]
])
return ppc
| 134.260851
| 201
| 0.340705
|
e4a657793f9f05a77c5e81587237f08dc7c7295f
| 1,406
|
py
|
Python
|
code/tools/chgdef.py
|
jgresula/jagpdf
|
6c36958b109e6522e6b57d04144dd83c024778eb
|
[
"MIT"
] | 54
|
2015-02-16T14:25:16.000Z
|
2022-03-16T07:54:25.000Z
|
code/tools/chgdef.py
|
jgresula/jagpdf
|
6c36958b109e6522e6b57d04144dd83c024778eb
|
[
"MIT"
] | null | null | null |
code/tools/chgdef.py
|
jgresula/jagpdf
|
6c36958b109e6522e6b57d04144dd83c024778eb
|
[
"MIT"
] | 30
|
2015-03-05T08:52:25.000Z
|
2022-02-17T13:49:15.000Z
|
#!/usr/bin/env python
# Copyright (c) 2005-2009 Jaroslav Gresula
#
# Distributed under the MIT license (See accompanying file
# LICENSE.txt or copy at http://jagpdf.org/LICENSE.txt)
#
import re
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _rex( text ):
return re.compile( text, re.MULTILINE | re.DOTALL )
# -- cpp head --------------------
c_head_txt="""// $(Copyright)
// $(License)
//
// \x24Id\x24
"""
c_head_rex="""^// \$\(Copyright\)
// \$\(License\)
//
\n]*\$$
"""
c_head_repl='(?:^//.*?\n)+|^/\*.*?\*/\n+'
# -- cpp tail --------------------
c_tail_txt="/** EOF @file */\n"
c_tail_rex="^/\*\* EOF @file \*/\n+\Z"
c_tail_repl=""
# -- python head --------------------
py_head_txt="""#!/usr/bin/env python
#
# $(Copyright)
# $(License)
#
# \x24Id\x24
"""
py_head_rex="""#!/usr/bin/env python
#
# \$\(Copyright\)
# \$\(License\)
#
\n]*\$$
"""
py_head_repl='(?:^#.*?\n)+'
cfg = {}
cfg['*.cpp'] = Bunch( head_t=c_head_txt, head_r=_rex(c_head_rex),\
tail_t=c_tail_txt, tail_r = _rex(c_tail_rex),\
head_repl_r=_rex(c_head_repl), tail_repl_r=_rex(c_tail_repl) )
cfg['*.h']=cfg['*.cpp']
cfg['*.py'] = Bunch( head_t=py_head_txt, head_r=_rex(py_head_rex),\
tail_t=None, tail_r = None,\
head_repl_r=_rex(py_head_repl), tail_repl_r=None )
| 14.494845
| 84
| 0.536984
|
f0c268bbb2e7118901b53b23d5d2938d10a2dbd8
| 106
|
py
|
Python
|
esp8266/MicroPython ESP8266/Default files/boot.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | 12
|
2015-03-03T18:32:03.000Z
|
2021-03-13T18:50:37.000Z
|
esp8266/MicroPython ESP8266/Default files/boot.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | null | null | null |
esp8266/MicroPython ESP8266/Default files/boot.py
|
DocVaughan/CRAWLAB-Code-Snippets
|
90c946bef0fbe37401f822d58ce5a6b3c5349616
|
[
"BSD-3-Clause"
] | 7
|
2017-01-20T20:31:54.000Z
|
2021-12-28T16:52:48.000Z
|
# This file is executed on every boot (including wake-boot from deepsleep)
import webrepl
webrepl.start()
| 26.5
| 74
| 0.792453
|
94c06786dc42937e7f59dd295823ee08c90d1756
| 289
|
py
|
Python
|
Section 5/5.2/code.py
|
PacktPublishing/-Data-Wrangling-with-Python-3.x
|
0273db00752a05e347e149d68decb45ed4778d2c
|
[
"MIT"
] | 16
|
2019-02-07T09:17:11.000Z
|
2022-01-30T12:38:47.000Z
|
Section 5/5.2/code.py
|
PacktPublishing/-Data-Wrangling-with-Python-3.x
|
0273db00752a05e347e149d68decb45ed4778d2c
|
[
"MIT"
] | 1
|
2019-07-06T15:36:28.000Z
|
2019-07-06T15:36:28.000Z
|
Section 5/5.2/code.py
|
PacktPublishing/-Data-Wrangling-with-Python-3.x
|
0273db00752a05e347e149d68decb45ed4778d2c
|
[
"MIT"
] | 15
|
2019-02-12T09:46:03.000Z
|
2022-01-04T16:54:28.000Z
|
import pandas as pd
dataset = pd.read_csv('employees.csv')
dataset.dropna(axis = 0, inplace = True)
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(dataset['Gender'])
gender_encoded = le.transform(dataset['Gender'])
dataset['Gender_encoded'] = gender_encoded
| 24.083333
| 48
| 0.768166
|
51ad27efa118b1e7ab8939a8d37af56debee4587
| 894
|
py
|
Python
|
styles.py
|
imphatic/source-crawler
|
9c7f2d95e3f232367d9b6243dc2c9897a9d09442
|
[
"MIT"
] | null | null | null |
styles.py
|
imphatic/source-crawler
|
9c7f2d95e3f232367d9b6243dc2c9897a9d09442
|
[
"MIT"
] | null | null | null |
styles.py
|
imphatic/source-crawler
|
9c7f2d95e3f232367d9b6243dc2c9897a9d09442
|
[
"MIT"
] | null | null | null |
class Styles:
# Colors
colors = {
'background': 'black',
'foreground': 'white'
}
# object styles
object_styles = {
'label': {
'bg': colors['background'],
'fg': colors['foreground'],
'font': 'none 12 bold',
'text': 'Add Label Text'
},
'entry': {
'width': 30,
'bg': colors['background'],
'fg': colors['foreground']
},
'text': {
'height': 20,
'width': 60,
'bg': colors['background'],
'fg': colors['foreground']
}
}
def __new__(cls, style_name, other_styles=None):
cls.style_name = style_name
default_styles = cls.object_styles[style_name].copy()
if other_styles:
default_styles.update(other_styles)
return default_styles
| 21.804878
| 61
| 0.474273
|
d37bfd935849cc1a730ce5de39bc9e380ac2b2bb
| 7,612
|
py
|
Python
|
Helper.py
|
RodiiHernandezz/ZendaJS
|
7091716d35c70442897ac47841352865018661fb
|
[
"MIT"
] | null | null | null |
Helper.py
|
RodiiHernandezz/ZendaJS
|
7091716d35c70442897ac47841352865018661fb
|
[
"MIT"
] | null | null | null |
Helper.py
|
RodiiHernandezz/ZendaJS
|
7091716d35c70442897ac47841352865018661fb
|
[
"MIT"
] | null | null | null |
# ***
# * Copyright (C) Rodolfo Herrera Hernandez. All rights reserved.
# * Licensed under the MIT license. See LICENSE file in the project root
# * for full license information.
# *
# * =+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# *
# * For related information - https://github.com/codewithrodi/ZendaJS/
# *
# * =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# ****/
from os import system
from time import sleep
import sys, platform
# Just tested in Linux Mint
kSupportedOperativeSystems = ['Darwin', 'Linux']
kOperativeSystem = platform.system()
kRequiredPackages = {
'Build': {
'Linux': [
'sudo apt-get install make',
'sudo apt-get install python3.8-dev',
'sudo apt-get install g++',
'sudo apt-get install python3'
'sudo apt-get install gtk+-3.0',
'sudo apt-get install webkit2gtk-4.0'
],
'Darwin': [
'brew install make',
'brew install gcc',
'brew install python'
]
},
'Dependencies': {
'Linux': [
'sudo apt-get install python3-pip',
'pip install python3-tk'
'pip install python3.7-tk'
],
'Darwin': [
'brew install python-tk'
]
}
}
def FinishScript() -> None:
print('\n\n * Remember drink water <3\n\n')
def ClearScreen() -> None:
if kOperativeSystem == 'Windows':
system('cls')
else:
system('clear')
def ContinueOrExit() -> None:
ContinueOrExit = input('\n * Do you want continue[Y/n] > ')
if ContinueOrExit.lower() != 'y':
FinishScript()
def InstallPackages(PackageList = 'Dependencies') -> None:
kPackageList = kRequiredPackages[PackageList][kOperativeSystem]
for kCommand in kPackageList:
print(f' * Installing <{kCommand}>\n')
system(kCommand)
ClearScreen()
def SupportedOperativeSystemOrExit() -> None:
if not kOperativeSystem in kSupportedOperativeSystems:
print(f' * Error, not supported OS<{kOperativeSystem}>.')
FinishScript()
else:
print(f'\n > Compatible <{kOperativeSystem}>')
def InstallationSuccess() -> None:
print('''\
* Installation completed
- Remember to follow the instructions you have in
- the Github repository, because you have to
- execute a series of commands so that you can
- install Zenda correctly on your system
- along with its required packages.
* If you have mistakes...
- Mail => contact@codewithrodi.com
- Website => https://codewithrodi.com/
- Repository => https://github.com/codewithrodi/ZendaJS/
- If you are installing Zenda remember to correctly follow the
- instructions indicated in the Github repository.
> Remember drink water :3.''')
def DisplayPackagesToInstall(PackageList = 'Dependencies') -> None:
print('\n * Packages\n')
for kPackage in kRequiredPackages[PackageList][kOperativeSystem]:
print(f' -> {kPackage}')
print('\n > Installing packages in the next 7 seconds...')
sleep(7)
ClearScreen()
try:
kScriptCallArguments = sys.argv
ClearScreen()
if '--runtime.dependencies' in kScriptCallArguments:
print('''\
* ZendaJS -> Install
- Welcome to the installation of Zenda in your machine, next the
- necessary packages will be installed so that Zenda works
- correctly in your computer.
* Attention
- There are processes that you must execute yourself from the
- console, we cannot do everything from the programming
- language, in the same way if you do not carry out these
- steps, they are in the README.md of Github so that you
- can execute them and finish the installation of Zenda.
* Now what?
- Then you must decide if you want to install Zenda on
- your computer or not, for this you must enter (y) or (Y)
- in case you do not want to install and want to finish the
- installer use (n) or (N) or any other character.''')
ContinueOrExit()
ClearScreen()
print('''\
* Perfect
- Then the program will check the operating
- system, if it is compatible you can
- continue with the installation.\n''')
SupportedOperativeSystemOrExit()
DisplayPackagesToInstall('Dependencies')
InstallPackages('Dependencies')
InstallationSuccess()
elif '--build.dependencies' in kScriptCallArguments:
print('''\
* Zenda -> Installer packages required to build the software.
> Warning: This is a beta version not fully tested.
-> What will happen next?
- Next, the necessary packages will be installed to
- compile Zenda, depending on your operating
- system, different packages will be installed, not all
- operating systems are supported, therefore a check
- will be made before so that if it is not
- compatible the execution of the program will end.
-> Why compile?
- It is likely that if you want to alter the source
- code of Zenda, you will need to compile, but you
- will also need to compile in case you are on a
- system whose kernel is different from Linux, ZendaJS
- comes with a ready executable, which you can integrate
- into your operating system from Easy way using the
- commands that you find in Github, but it is likely
- that you will get errors if you are on MacOS, to
- solve these errors you need to compile the program.''')
ContinueOrExit()
ClearScreen()
print('''\
* Perfect
- Then the program will check the operating
- system, if it is compatible you can
- continue with the installation.\n''')
SupportedOperativeSystemOrExit()
DisplayPackagesToInstall('Build')
InstallPackages('Build')
InstallationSuccess()
else:
print('''\
* Error running Helper script
- To use the helper you need to specify a command which will execute
- a series of actions for you, for example if you want to install
- the dependencies that Zenda needs to be able to execute all its
- functionalities without problems you should write the following
- in your command line "python3 Helper.py --runtime.deps", as you
- as you will see the command you are indicating is" --runtime.deps"
- which is captured in the script taking the arguments with which
- you have called the Helper, through that you decide what
- to do and what not to do.
* Commands
=> --runtime.dependencies
- It installs the necessary dependencies to run a Zenda script
- without problems, it may or may not be necessary but it is
- recommended that it be installed to avoid future problems at runtime.
=> --build.dependencies
- Install all the necessary dependencies to be able to build the source
- code of Zenda and thus build a new executable, this can be useful
- if you want to implement new functionalities to the technology, either
- to contribute or for personal profit, you can also make use of this
- command if You have problems when executing the executable and you
- have to create a new one according to your operating system, this
- could happen in MacOS and derivatives.''')
except Exception as Error:
print(f'''\
* An error occurred when the Helper was running :(.
> {Error}''')
except KeyboardInterrupt:
ClearScreen(), FinishScript()
| 31.716667
| 102
| 0.638334
|
24b95b815f5908825ef2e07f686e17a674822dfb
| 912
|
py
|
Python
|
flask-new/main.py
|
JanakiRaman-2002/owaspLegacyPage
|
a940a353745317ac2887b65e7b7c397672161f9f
|
[
"Apache-2.0"
] | null | null | null |
flask-new/main.py
|
JanakiRaman-2002/owaspLegacyPage
|
a940a353745317ac2887b65e7b7c397672161f9f
|
[
"Apache-2.0"
] | null | null | null |
flask-new/main.py
|
JanakiRaman-2002/owaspLegacyPage
|
a940a353745317ac2887b65e7b7c397672161f9f
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask,redirect,url_for,render_template
import json
app = Flask(__name__)
@app.route("/")
def home():
return render_template("parallax.html", context=len(data_dict))
data_JSON = """
[
{
"name": "Bonda",
"post":"secure",
"link":"www.google.com"
},
{
"name": "Bunda",
"post":"secure",
"link":"www.google.com"
},
{
"name": "ponda",
"post":"secure",
"link":"www.google.com"
},
{
"name": "konda",
"post":"secure",
"link":"www.google.com"
},
{
"name": "Banda",
"post":"secure",
"link":"www.google.com"
},
{
"name": "londa",
"post":"secure",
"link":"www.google.com"
}
]
"""
# Convert JSON string to dictionary
data_dict = json.loads(data_JSON)
print(data_dict[1]['name'])
if __name__ == "__main__":
app.run()
| 16
| 68
| 0.505482
|
c18f0810be88cb55b0b7a3218bbd98acda2fb189
| 12,655
|
py
|
Python
|
tests/testcases/test_data_collection.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
tests/testcases/test_data_collection.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
tests/testcases/test_data_collection.py
|
jaelgu/towhee
|
34c79cf50831dc271ae0ab02f319f9e355c2d0bf
|
[
"Apache-2.0"
] | null | null | null |
# coding : UTF-8
import sys
sys.path.append("../../tests")
import towhee
import time
import threading
import numpy as np
import random
import operator
from towhee import pipeline
from common import common_func as cf
from towhee.functional import DataCollection
class TestDataCollectionAPIsInvalid:
""" Test case of invalid data collection interface """
def test_data_collection_API_no_parameter(self, API_name):
"""
target: test data collection APIs for invalid scenario
method: call APIs with no parameters
expected: raise exception
"""
API = eval("DataCollection.%s" % API_name)
try:
dc = API()
except Exception as e:
assert "missing" in str(e)
return True
def test_data_collection_API_not_supported_type(self, API_name):
"""
target: test data collection APIs for invalid scenario
method: call APIs with not supported data types
expected: raise exception
"""
if API_name in ["filter", "batch", "rolling", "pmap", "map", "mmap"]:
return True
not_support_datas = ["string", {1, "s", 2, 3}]
API = eval("DataCollection.%s" % API_name)
for not_support_data in not_support_datas:
try:
dc = API(not_support_data)
except Exception as e:
assert "no attribute" in str(e)
return True
class TestDataCollectionAPIsValid:
""" Test case of invalid data collection interface """
def test_data_collection_map_lambda(self):
"""
target: test filter() API for DataCollection
method: create a data collection and map data with lambda
expected: return map successfully
"""
size = 5
dc = DataCollection.range(size)
dc = dc.map(lambda x: x+1)
result = dc.to_list()
assert len(result) == size
return True
def test_data_collection_map_inter(self):
"""
target: test map() API for DataCollection
method: create an iter data collection and map it
expected: return map successfully
"""
size = 5
data = iter(range(size))
dc = towhee.dc(data)
dc = dc.map(str)
result = dc.to_list()
assert len(result) == size
return True
def test_data_collection_map_empty(self):
"""
target: test map() API for DataCollection
method: create an empty data collection and map it
expected: return map successfully
"""
data = []
dc =towhee.dc(data)
dc = dc.map(str)
result = dc.to_list()
assert result == data
return True
def test_data_collection_map_large_size(self):
"""
target: test map() API for DataCollection
method: create a large size data collection and map it
expected: return map successfully
"""
data = 10000000
dc = DataCollection.range(data)
dc = dc.map(str)
result = dc.to_list()
assert len(result) == data
return True
def test_data_collection_filter_lambda(self):
"""
target: test filter() API for DataCollection
method: create a data collection and filter data with lambda
expected: return filter successfully
"""
size = 5
dc = DataCollection.range(size)
dc = dc.filter(lambda x: x+1)
result = dc.to_list()
assert len(result) == size
return True
def test_data_collection_filter_inter(self):
"""
target: test filter() API for DataCollection
method: create an iter data collection and filter it
expected: return filter successfully
"""
size = 5
data = iter(range(size))
dc = towhee.dc(data)
dc = dc.filter(str)
result = dc.to_list()
assert len(result) == size
return True
def test_data_collection_filter_empty(self):
"""
target: test filter() API for DataCollection
method: create an empty data collection and filter it
expected: return filter successfully
"""
data = []
dc = towhee.dc(data)
dc = dc.filter(str)
result = dc.to_list()
assert result == data
return True
def test_data_collection_filter_large_size(self):
"""
target: test filter() API for DataCollection
method: create a large size data collection and filter it
expected: return filter successfully
"""
data = 10000000
dc = DataCollection.range(data)
dc = dc.filter(str)
result = dc.to_list()
assert len(result) == data
return True
def test_data_collection_zip_lambda(self):
"""
target: test zip() API for DataCollection
method: create a data collection and zip with lambda
expected: return zip successfully
"""
size = 5
dc1 = DataCollection.range(size)
dc2 = dc1.map(lambda x: x+1)
dc3 = dc1.zip(dc2)
result = dc3.to_list()
assert len(result) == size
return True
def test_data_collection_zip_inter(self):
"""
target: test zip() API for DataCollection
method: create an iter data collection and zip it
expected: return zip successfully
"""
size = 7
data = iter(range(size))
dc1 = towhee.dc(data)
dc2 = dc1.map(str)
dc3 = dc1.zip(dc2)
result = dc3.to_list()
assert len(result) == int(size/2)
return True
def test_data_collection_zip_empty(self):
"""
target: test zip() API for DataCollection
method: create an empty data collection and zip it
expected: return zip successfully
"""
data = []
dc1 = towhee.dc(data)
size = 1
dc2 = DataCollection.range(size)
dc3 = dc1.zip(dc2)
result = dc3.to_list()
assert result == data
return True
def test_data_collection_zip_large_size(self):
"""
target: test zip() API for DataCollection
method: create a large size data collection and zip it
expected: return zip successfully
"""
data = 10000000
dc1 = DataCollection.range(data)
dc2 = dc1.filter(str)
dc3 = dc1.zip(dc2)
result = dc3.to_list()
assert len(result) == data
return True
def test_data_collection_batch_data(self):
"""
target: test batch() API for DataCollection
method: create a data collection and batch it
expected: return batch successfully
"""
data = 10
dc = DataCollection.range(data)
size = 3
result = [list(batch) for batch in dc.batch(size, drop_tail=True)]
assert len(result) == int(data/size)
return True
def test_data_collection_batch_inter(self):
"""
target: test batch() API for DataCollection
method: create an iter data collection and batch it
expected: return batch successfully
"""
data_size = 6
data = iter(range(data_size))
dc = towhee.dc(data)
size = 3
result = [list(batch) for batch in dc.batch(size, drop_tail=True)]
assert len(result) == int(data_size/size)
return True
def test_data_collection_batch_large_size(self):
"""
target: test batch() API for DataCollection
method: create a large size data collection and batch with large size
expected: return batch successfully
"""
data = 10000000
dc = DataCollection.range(data)
size = 1000000
result = [list(batch) for batch in dc.batch(size, drop_tail=True)]
assert len(result) == int(data/size)
return True
def test_data_collection_batch_size_empty(self):
"""
target: test batch() API for DataCollection
method: create a data collection and batch size is empty
expected: return batch successfully
"""
data = 5
dc = DataCollection.range(data)
size = []
result = [list(batch) for batch in dc.batch(size)]
assert len(result) == 1
return True
def test_data_collection_rolling_drop_head(self):
"""
target: test rolling() API for DataCollection
method: create a data collection and rolling it with drop_head is False
expected: return rolling successfully
"""
data = 5
dc = DataCollection.range(data)
size = 3
result = [list(batch) for batch in dc.rolling(size, drop_head=False)]
assert len(result) == data
return True
def test_data_collection_rolling_drop_tail(self):
"""
target: test rolling() API for DataCollection
method: create a data collection and rolling it with drop_tail is False
expected: return rolling successfully
"""
data = 10
dc = DataCollection.range(data)
size = 3
result = [list(batch) for batch in dc.rolling(size, drop_tail=False)]
assert len(result) == data
return True
def test_data_collection_rolling_large_size(self):
"""
target: test rolling() API for DataCollection
method: create a large size data collection and rolling it with large size
expected: return rolling successfully
"""
data = 100000
dc = DataCollection.range(data)
size = 1000
result = [list(batch) for batch in dc.rolling(size, drop_tail=False)]
assert len(result) == data
return True
def test_data_collection_rolling_size_empty(self):
"""
target: test rolling() API for DataCollection
method: create a data collection and rolling size is empty
expected: return rolling successfully
"""
data = 10
dc = DataCollection.range(data)
size = []
result = [list(batch) for batch in dc.rolling(size)]
assert len(result) == 0
return True
def test_data_collection_rolling_inter(self):
"""
target: test rolling() API for DataCollection
method: create an iter data collection and rolling it
expected: return rolling successfully
"""
data_size = 6
data = iter(range(data_size))
dc = towhee.dc(data)
size = 3
result = [list(batch) for batch in dc.rolling(size, drop_tail=False)]
assert len(result) == data_size
return True
def test_data_collection_flatten(self):
"""
target: test flatten() API for DataCollection
method: create a data collection and flatten it
expected: return flatten successfully
"""
data = 10
dc = DataCollection.range(data)
size = 3
res = dc.batch(size)
result = res.flatten().to_list()
assert len(result) == data
return True
def test_data_collection_flatten_large_size(self):
"""
target: test flatten() API for DataCollection
method: create a data collection and flatten it with large size
expected: return flatten successfully
"""
data = 10000000
dc = DataCollection.range(data)
size = 1000000
res = dc.batch(size)
result = res.flatten().to_list()
assert len(result) == data
return True
def test_data_collection_flatten_size_empty(self):
"""
target: test flatten() API for DataCollection
method: create a data collection and flatten it with empty size
expected: return flatten successfully
"""
data = 10
dc = DataCollection.range(data)
size = []
res = dc.batch(size)
result = res.flatten().to_list()
assert len(result) == data
return True
def test_data_collection_flatten_inter(self):
"""
target: test flatten() API for DataCollection
method: create an iter data collection and flatten it
expected: return flatten successfully
"""
data_size = 6
data = iter(range(data_size))
dc = towhee.dc(data)
size = 3
res = dc.batch(size)
result = res.flatten().to_list()
assert len(result) == data_size
return True
| 30.347722
| 82
| 0.59115
|
d1b1f6b684725808d36921aa5d4204fd683ee894
| 276
|
py
|
Python
|
tests/test_repr.py
|
eikevons/pandas-paddles
|
98e519ce847d015b76bc3401d534b8b752dd583d
|
[
"MIT"
] | 4
|
2022-02-24T09:35:37.000Z
|
2022-03-19T19:50:47.000Z
|
tests/test_repr.py
|
eikevons/pandas-paddles
|
98e519ce847d015b76bc3401d534b8b752dd583d
|
[
"MIT"
] | null | null | null |
tests/test_repr.py
|
eikevons/pandas-paddles
|
98e519ce847d015b76bc3401d534b8b752dd583d
|
[
"MIT"
] | null | null | null |
from pandas_paddles.df_accessor import AccessorBase
from pandas_paddles import DF, S
def test_DF_repr():
r = repr(DF["key"].attrib.method(arg=1))
assert isinstance(r, str)
def test_S_repr():
r = repr(S["key"].attrib.method(arg=1))
assert isinstance(r, str)
| 23
| 51
| 0.702899
|
8617fa92671d4e047cd7cd2f3cdce81bef6571bb
| 153
|
py
|
Python
|
welltrajconvert/__init__.py
|
bpamos/welltrajconvert
|
86ee684fdb1968218ac17e4b260df80c6c88f475
|
[
"MIT"
] | 7
|
2020-09-24T09:33:53.000Z
|
2022-01-04T14:47:22.000Z
|
welltrajconvert/__init__.py
|
bpamos/directional-survey-converter
|
86ee684fdb1968218ac17e4b260df80c6c88f475
|
[
"MIT"
] | 4
|
2020-06-19T13:54:11.000Z
|
2020-08-13T18:11:38.000Z
|
welltrajconvert/__init__.py
|
bpamos/directional-survey-converter
|
86ee684fdb1968218ac17e4b260df80c6c88f475
|
[
"MIT"
] | 3
|
2020-12-07T19:53:08.000Z
|
2022-03-15T08:49:29.000Z
|
from .wellbore_trajectory import *
from .deviation_survey import *
from .data_source import *
from .data_object import *
from .calculable_object import *
| 30.6
| 34
| 0.810458
|
f97bb1bb64ff1b5082ffe4a81a1d7e07ceeae7f7
| 6,824
|
py
|
Python
|
bayenv2_SNPSFILE_breaker/bayenv2_SNPSFILE_breaker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | 3
|
2017-07-17T07:45:37.000Z
|
2019-04-12T21:03:20.000Z
|
bayenv2_SNPSFILE_breaker/bayenv2_SNPSFILE_breaker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | null | null | null |
bayenv2_SNPSFILE_breaker/bayenv2_SNPSFILE_breaker.py
|
tyrmi/PGU
|
bd04b62599e13124426f0830fe53e4324aca8439
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Copyright © 2017 Jaakko Tyrmi. All Rights Reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import optparse
import os
import shutil
import sys
VERSION = '17.01.30'
NAME = 'bayenv2_SNPSFILE_breaker'
descr = """
This program breaks a bayenv2 SNPSFILE into separate SNPFILEs containing
single SNPs for parallelization purposes. This operation could be achieved
with unix split command but using this script (optionally) renames the output
SNPSFILEs based on vcf file from which the variants originated. Vcf to
SNPSFILE conversion can be done with PGDSpider. There is one caveat in using
the vcf input: No sites should be omitted in the vcf to SNPSFILE conversion
and PGD spider omits monomorphic sites. VCF file should therefore be filtered
before conversion so sites will be omitted in the conversion. Output files
will have file extension ".SNPFILE". Bayenv2 requires that all input files
should be in the same directory with bayenv2 executable, so bayenv2 executable,
ENVIRONFILE file and MATRIXFILE will be copied to each SNPFILE output
directory to allow parallelized run. The files will be renamed to
MATRIXFILE.txt and ENVIRONFILE.txt for easy parallelization with
PipelineMaster1000. Refer to bayenv2 manual on how to generate ENVIRONFILE and
MATRIXFILE.
"""
print '\nRunning {0} v.{1}'.format(NAME, VERSION)
parser = optparse.OptionParser(description=descr)
parser.add_option('-i', '--input_file', help='path to a SNPSFILE')
parser.add_option('-o', '--output_dir', help='output directory')
parser.add_option('-v', '--vcf_file', help='original vcf file of which '
'SNPSFILE was made of (optional)')
parser.add_option('-b', '--bayenv2_executable', help='path to bayenv2 executable file')
parser.add_option('-e', '--environfile', help='path to environfile')
parser.add_option('-m', '--matrixfile', help='path to matrixfile')
args = parser.parse_args()[0]
def bayenv2_SNPSFILE_breaker(in_file_path, out_dir_path, vcf_file_path, bayenv2_executable, environfile, matrixfile):
print 'Using parameters:'
print 'SNPSFILE', in_file_path
print 'Output directory', out_dir_path
print 'Bayenv2 executable', bayenv2_executable
print 'Vcf file', vcf_file_path
print 'ENVIRONFILE', environfile
print 'MATRIXFILE', matrixfile
if vcf_file_path is not None:
print 'Reading vcf file...'
in_handle = open(vcf_file_path)
i = 0
vcf_positions = []
for line in in_handle:
i += 1
line = line.strip()
if not line: continue
if line.startswith('#'): continue
line = line.split('\t')
if len(line) < 9:
print 'ERROR! Vcf file format should contain at least 9 columns!'
print 'Line {0} had {1} columns!'.format(i, len(line))
sys.exit(0)
vcf_positions.append('{0}_{1}'.format(line[0], line[1]))
in_handle.close()
print 'Done. Found {0} sites.'.format(len(vcf_positions))
print '\nBreaking SNPSFILE into parts...'
in_handle = open(in_file_path)
i = 0
variants = []
for line in in_handle:
if not line.strip(): continue
i += 1
variants.append(line)
if i%2 == 0:
if vcf_file_path is not None:
current_out_dir_path = os.path.join(out_dir_path, 'bayenv2_input_{0}.SNPFILEDIR'.format(vcf_positions[(i/2)-1]))
try:
os.mkdir(current_out_dir_path)
except OSError: # Directory exists already
pass
output_path = os.path.join(current_out_dir_path, 'bayenv2_input_{0}.SNPFILE'.format(vcf_positions[(i/2)-1]))
try:
out_handle = open(output_path, 'w')
except IndexError:
print 'ERROR! The vcf file has fewer variants ({0}) than the input SNPFILE ({1})!'.format(len(vcf_positions), i/2)
sys.exit(0)
else:
current_out_dir_path = os.path.join(out_dir_path, 'bayenv2_input_{0}.SNPFILEDIR'.format(vcf_positions[(i / 2) - 1]))
try:
os.mkdir(current_out_dir_path)
except OSError: # Directory exists already
pass
output_path = os.path.join(current_out_dir_path, 'bayenv2_input_{0}.SNPSFILE'.format(str([i/2]).rjust(10, '0')))
out_handle = open(output_path)
out_handle.write(''.join(variants))
out_handle.close()
shutil.copy(bayenv2_executable, current_out_dir_path)
shutil.copy(environfile, os.path.join(current_out_dir_path, 'ENVIRONFILE.txt'))
shutil.copy(matrixfile, os.path.join(current_out_dir_path, 'MATRIXFILE.txt'))
variants = []
if vcf_file_path is not None:
if i/2 > len(vcf_positions):
print 'ERROR! The vcf file has more variants ({0}) than the input SNPSFILE ({1})!'.format(len(vcf_positions), i/2)
print 'The names of output files are wrong!'
sys.exit(0)
print 'Done. Created {0} output SNPFILEs!'.format(i/2)
print '\nProgram run successful!'
bayenv2_SNPSFILE_breaker(args.input_file, args.output_dir, args.vcf_file,
args.bayenv2_executable, args.environfile, args.matrixfile)
| 47.388889
| 135
| 0.66823
|
5fd3e4174012eb2974141f4e43139ea55efa8649
| 1,408
|
py
|
Python
|
common/common.py
|
trinity-project/trinity-eth
|
a4e4fff1d1dbc0b422d7acc21ed95a308cf51967
|
[
"MIT"
] | 15
|
2018-05-11T06:09:47.000Z
|
2020-07-30T05:59:41.000Z
|
common/common.py
|
trinity-project/trinity-eth
|
a4e4fff1d1dbc0b422d7acc21ed95a308cf51967
|
[
"MIT"
] | null | null | null |
common/common.py
|
trinity-project/trinity-eth
|
a4e4fff1d1dbc0b422d7acc21ed95a308cf51967
|
[
"MIT"
] | 6
|
2018-08-06T19:00:35.000Z
|
2020-12-03T02:13:45.000Z
|
# --*-- coding : utf-8 --*--
"""Author: Trinity Core Team
MIT License
Copyright (c) 2018 Trinity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import re
from common.log import LOG
def uri_parser(uri):
uri_list = re.split(r'[@:]', uri)
# to check the length
if 3 == len(uri_list):
return uri_list[0].strip(), uri_list[1].strip(), uri_list[2].strip()
raise ValueError('Invalid uri<{}>'.format(uri))
| 38.054054
| 78
| 0.756392
|
bc91fc537d93333b2ea98f567cd7b9a48d1a2a2a
| 2,243
|
py
|
Python
|
aoi-demo-labeling/coco_train_test_split.py
|
goheesheng/AOI_FHG
|
e1ae4a11030aa8f9820d55c1c1fe16ca8a4fb3f3
|
[
"MIT"
] | null | null | null |
aoi-demo-labeling/coco_train_test_split.py
|
goheesheng/AOI_FHG
|
e1ae4a11030aa8f9820d55c1c1fe16ca8a4fb3f3
|
[
"MIT"
] | null | null | null |
aoi-demo-labeling/coco_train_test_split.py
|
goheesheng/AOI_FHG
|
e1ae4a11030aa8f9820d55c1c1fe16ca8a4fb3f3
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import argparse
import json
import copy
import random
import math
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_path", default="./data/combined_instances_coco.json",
help="Path to a COCO annotations which shall be split.")
parser.add_argument("--test_frac", default=0.33, type=float,
help="Fraction of data which will be used for the test set")
args = parser.parse_args()
input_path = Path(args.input_path)
test_frac = args.test_frac
with open(input_path, 'r') as f:
input_data = json.load(f)
# chip ids are determined by the relative image paths compared to root
img_chip_ids = [
str(Path(img["file_name"]).parent) for img in input_data["images"]]
img_chip_ids = list(set(img_chip_ids))
# split chip ids in train/test
random.shuffle(img_chip_ids)
n_test = math.ceil(test_frac * len(img_chip_ids))
test_ids = img_chip_ids[:n_test]
train_ids = img_chip_ids[n_test:]
train_data = select_data_by_subfolder_name(input_data, train_ids)
test_data = select_data_by_subfolder_name(input_data, test_ids)
update_annotation_ids(train_data["annotations"])
update_annotation_ids(test_data["annotations"])
output_train_path = input_path.parent / "train_instances_coco.json"
output_test_path = input_path.parent / "test_instances_coco.json"
with open(output_train_path, 'w') as out:
json.dump(train_data, out, indent=4, sort_keys=True)
with open(output_test_path, 'w') as out:
json.dump(test_data, out, indent=4, sort_keys=True)
print("Done")
def select_data_by_subfolder_name(data, ids):
selected_data = data.copy()
selected_data["images"] = [img for img in data["images"]
if str(Path(img["file_name"]).parent) in ids]
selected_img_ids = [img["id"] for img in selected_data["images"]]
selected_data["annotations"] = [
ann for ann in data["annotations"] if ann["image_id"] in selected_img_ids]
return selected_data
def update_annotation_ids(anns):
for i, ann in enumerate(anns):
ann["id"] = i+1
if __name__ == "__main__":
main()
| 30.310811
| 86
| 0.682568
|
fa6e5e23d9fce6ea05bd8e28032644b9746eae03
| 1,528
|
py
|
Python
|
examples/desaminases/get_variants_in_UTR.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
examples/desaminases/get_variants_in_UTR.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
examples/desaminases/get_variants_in_UTR.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'mahajrod'
import os
from Parsers.VCF import CollectionVCF
def variants_in_UTR(collection, feature_type="5'_UTR"):
UTR_record_list = []
for variant in collection:
if feature_type in variant.info_dict["Ftype"]:
UTR_record_list.append(variant)
return CollectionVCF(from_file=False, record_list=UTR_record_list, metadata=collection.metadata, header=collection.header)
if __name__ == "__main__":
workdir = "/media/mahajrod/d9e6e5ee-1bf7-4dba-934e-3f898d9611c8/Data/LAN2xx/combined_vcf/clusters/all/pre_UTR_strandness/"
sample_set_names_list = ["PmCDA1_3d",
#"HAP",
"PmCDA1_sub1_3d",
"PmCDA1_6d",
#"HAP_sub1",
"PmCDA1_sub1_6d",
#"A1_3d",
#"A1_6d",
#"A3G_3d",
#"AID_3d",
#"AID_6d"
]
os.chdir(workdir)
feature_type_list = ["5'_UTR", "CDS", "3'_UTR"]
for sample_set in sample_set_names_list:
vcf_file = "%s_good.vcf" % sample_set
variants = CollectionVCF(from_file=True, vcf_file=vcf_file)
for feature_type in feature_type_list:
pre_UTR_variants = variants_in_UTR(variants, feature_type=feature_type)
pre_UTR_variants.write("%s_%s_variants.vcf" % (sample_set, feature_type))
| 38.2
| 126
| 0.567408
|
707d64360b9293f9df614c459b8833113b474d6f
| 699
|
py
|
Python
|
pyasdf/tags/unit/tests/test_unit.py
|
embray/pyasdf
|
3e0a314ec01a88bedfb2ce8def83319362adc5a2
|
[
"BSD-3-Clause"
] | null | null | null |
pyasdf/tags/unit/tests/test_unit.py
|
embray/pyasdf
|
3e0a314ec01a88bedfb2ce8def83319362adc5a2
|
[
"BSD-3-Clause"
] | null | null | null |
pyasdf/tags/unit/tests/test_unit.py
|
embray/pyasdf
|
3e0a314ec01a88bedfb2ce8def83319362adc5a2
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from astropy import units as u
from .... import asdf
from ....tests import helpers
# TODO: Implement defunit
def test_invalid_unit():
yaml = """
unit: !unit/unit
foo
"""
buff = helpers.yaml_to_asdf(yaml)
ff = asdf.AsdfFile.read(buff)
assert isinstance(ff.tree['unit'], u.UnrecognizedUnit)
def test_unit():
yaml = """
unit: !unit/unit "2.1798721 10-18kg m2 s-2"
"""
buff = helpers.yaml_to_asdf(yaml)
ff = asdf.AsdfFile.read(buff)
assert ff.tree['unit'].is_equivalent(u.Ry)
| 19.416667
| 82
| 0.67382
|
7da4cbc6cae4c8430e2008c7459825a1ff245a6a
| 16,222
|
py
|
Python
|
stage2/server/UCSServer.py
|
vallard/KUBaM
|
abdc60830a810243d198a09e8897366d518dcf33
|
[
"Apache-2.0"
] | null | null | null |
stage2/server/UCSServer.py
|
vallard/KUBaM
|
abdc60830a810243d198a09e8897366d518dcf33
|
[
"Apache-2.0"
] | null | null | null |
stage2/server/UCSServer.py
|
vallard/KUBaM
|
abdc60830a810243d198a09e8897366d518dcf33
|
[
"Apache-2.0"
] | null | null | null |
from ucsmsdk.ucsexception import UcsException
import re, sys
# given an array and a string of numbers, make sure they are all in the array:
#
def check_values(array, csv):
indexes = csv.split(',')
for i in indexes:
try:
i = int(i) - 1
except:
print "bad value: " + i
return False
if i < 0 or i > len(array) - 1:
return False
return True
# get the available servers to put in the pool.
def select_kube_servers(handle):
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
print "Listing Available UCS Servers"
filter_string = '(presence, "equipped")'
# get blades
blades = handle.query_classid("fabricComputeSlotEp", filter_string)
# get all connected rack mount servers.
servers = handle.query_classid("computeRackUnit")
m = blades + servers
while True:
for i, s in enumerate(m):
if type(s) is FabricComputeSlotEp:
print "[%d]: Blade %s/%s type %s" % (i+1, s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "[%d]: Rack %s type %s" % (i+1, s.rn, s.model)
vals = raw_input("(E.g.: 2,4,8): ")
if check_values(m, vals) == True:
k8servers = [m[int(x)-1] for x in vals.split(',')]
print "Install Kubernetes on the following servers:"
for s in k8servers:
if type(s) is FabricComputeSlotEp:
print "\tBlade %s/%s type %s" % (s.chassis_id, s.rn, s.model)
if type(s) is ComputeRackUnit:
print "\tServer %s type %s" % (s.rn, s.model)
yn = raw_input("Is this correct? [N/y]: ")
if yn == "y" or yn == "Y":
return k8servers
def createKubeBootPolicy(handle):
print "Creating Kube Boot Policy"
from ucsmsdk.mometa.lsboot.LsbootPolicy import LsbootPolicy
from ucsmsdk.mometa.lsboot.LsbootVirtualMedia import LsbootVirtualMedia
from ucsmsdk.mometa.lsboot.LsbootStorage import LsbootStorage
from ucsmsdk.mometa.lsboot.LsbootLocalStorage import LsbootLocalStorage
from ucsmsdk.mometa.lsboot.LsbootDefaultLocalImage import LsbootDefaultLocalImage
mo = LsbootPolicy(parent_mo_or_dn="org-root", name="kube", descr="Kuberenetes", reboot_on_update="yes", policy_owner="local", enforce_vnic_name="yes", boot_mode="legacy")
mo_1 = LsbootVirtualMedia(parent_mo_or_dn=mo, access="read-only-remote-cimc", lun_id="0", order="2")
mo_2 = LsbootStorage(parent_mo_or_dn=mo, order="1")
mo_2_1 = LsbootLocalStorage(parent_mo_or_dn=mo_2, )
mo_2_1_1 = LsbootDefaultLocalImage(parent_mo_or_dn=mo_2_1, order="1")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeBootPolicy(handle):
mo = handle.query_dn("org-root/boot-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeLocalDiskPolicy(handle):
print "Creating Kube Local Disk Policy"
from ucsmsdk.mometa.storage.StorageLocalDiskConfigPolicy import StorageLocalDiskConfigPolicy
mo = StorageLocalDiskConfigPolicy(parent_mo_or_dn="org-root", protect_config="no", name="kube", descr="Kubernetes", flex_flash_raid_reporting_state="disable", flex_flash_state="disable", policy_owner="local", mode="raid-mirrored")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeLocalDiskPolicy(handle):
print "Deleting Kube Local Disk Policy"
mo = handle.query_dn("org-root/local-disk-config-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeUUIDPools(handle):
print "Creating Kube UUID Pools"
from ucsmsdk.mometa.uuidpool.UuidpoolPool import UuidpoolPool
from ucsmsdk.mometa.uuidpool.UuidpoolBlock import UuidpoolBlock
mo = UuidpoolPool(parent_mo_or_dn="org-root", policy_owner="local", prefix="derived", descr="Kubernetes Pool", assignment_order="default", name="kube")
mo_1 = UuidpoolBlock(parent_mo_or_dn=mo, to="C888-888888888100", r_from="C888-888888888001")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeUUIDPools(handle):
print "Deleting Kube UUID Pool"
mo = handle.query_dn("org-root/uuid-pool-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeServerPool(handle):
print "Creating Kubernetes Compute Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def addServersToKubePool(handle, servers):
print "Adding servers to Kubernetes Pool"
from ucsmsdk.mometa.compute.ComputePool import ComputePool
from ucsmsdk.mometa.compute.ComputePooledSlot import ComputePooledSlot
from ucsmsdk.mometa.compute.ComputePooledRackUnit import ComputePooledRackUnit
from ucsmsdk.mometa.compute.ComputeRackUnit import ComputeRackUnit
from ucsmsdk.mometa.fabric.FabricComputeSlotEp import FabricComputeSlotEp
mo = ComputePool(parent_mo_or_dn="org-root", policy_owner="local", name="Kubernetes", descr="")
for s in servers:
if type(s) is FabricComputeSlotEp:
ComputePooledSlot(parent_mo_or_dn=mo, slot_id=re.sub("slot-","", s.slot_id), chassis_id=str(s.chassis_id))
if type(s) is ComputeRackUnit:
ComputePooledRackUnit(parent_mo_or_dn=mo, id=re.sub("rack-unit-","", s.rn))
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteKubeServerPool(handle):
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/compute-pool-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServiceProfileTemplate(handle):
print "Creating Kubernetes Service Profile Template"
from ucsmsdk.mometa.ls.LsServer import LsServer
from ucsmsdk.mometa.vnic.VnicConnDef import VnicConnDef
from ucsmsdk.mometa.ls.LsRequirement import LsRequirement
from ucsmsdk.mometa.lstorage.LstorageProfileBinding import LstorageProfileBinding
mo = LsServer(parent_mo_or_dn="org-root",
policy_owner="local",
name="Kubernetes",
descr="Kubernetes Service Profile",
type="updating-template",
# Boot using Kubernetes Boot policy: local Disk, then Remote DVD
boot_policy_name="kube",
# Default Maintenance Policy
maint_policy_name="default",
# scrub policy
scrub_policy_name="kube",
# UUID Pool
ident_pool_name="kube",
# disks we use.
#local_disk_policy_name="kube",
#storage_profile_name="kube",
# virtual media policy
vmedia_policy_name="kube"
)
# create vNIC Connection Policy
VnicConnDef(parent_mo_or_dn=mo,
lan_conn_policy_name="kube")
# create server pool and add to template.
LsRequirement(parent_mo_or_dn=mo, name="Kubernetes")
# add storage profile.
mo_1 = LstorageProfileBinding(parent_mo_or_dn=mo, storage_profile_name="kube")
handle.add_mo(mo, True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
except Exception:
print Exception
def deleteServiceProfileTemplate(handle):
print "Deleting Kubernetes Service Profile Template"
print "Deleting Kubernetes Compute Pool"
mo = handle.query_dn("org-root/ls-Kubernetes")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createServers(handle, servers):
print "Creating Kubernetes Service Profiles"
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsbasetype import DnSet, Dn
for i, s in enumerate(servers):
dn_set = DnSet()
dn = Dn()
sp_name = "kube0%d" % (i+1)
dn.attr_set("value",sp_name)
dn_set.child_add(dn)
elem = ls_instantiate_n_named_template(cookie=handle.cookie,
dn="org-root/ls-Kubernetes",
in_error_on_existing="true",
in_name_set=dn_set,
in_target_org="org-root",
in_hierarchical="false")
try:
mo_list = handle.process_xml_elem(elem)
except UcsException as err:
if err.error_code == "105":
print "\t" + sp_name + " already exists."
else:
print err
def deleteServers(handle):
print "Deleting Kubernetes Nodes"
filter_string = '(dn, "ls-kube[0-9]+", type="re")'
kube = handle.query_classid("lsServer", filter_string)
for k in kube:
print "Deleting " + k.name
handle.remove_mo(k)
try:
handle.commit()
except AttributeError:
print "\talready deleted"
except UcsException as err:
print "\t"+ k.name + ": " + err.error_descr
def createKubeVirtualMedia(handle):
print "Adding Virtual Media Policy"
from urlparse import urlparse
import os.path
yn = False
url = ""
while yn == False:
print "What is the URL for the Boot ISO image?"
url = raw_input("(E.g.: http://192.168.2.2/kubam/centos7.2-boot.iso) : ")
print "You entered: " + url
yn = raw_input("Is this correct? [y/N]: ")
if yn != "y":
yn = False
o = urlparse(url)
paths = os.path.split(o.path)
scheme = o.scheme # http, https
if scheme == "":
scheme = "http"
filename = paths[-1]
address = o.hostname
path = "/".join(paths[:-1])
name = ".".join(paths[-1].split(".")[:-1])
from ucsmsdk.mometa.cimcvmedia.CimcvmediaMountConfigPolicy import CimcvmediaMountConfigPolicy
from ucsmsdk.mometa.cimcvmedia.CimcvmediaConfigMountEntry import CimcvmediaConfigMountEntry
mo = CimcvmediaMountConfigPolicy(name="kube",
retry_on_mount_fail="yes",
parent_mo_or_dn="org-root",
policy_owner="local",
descr="Kubernetes Boot Media")
mo_1 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name=name,
device_type="cdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="none",
image_file_name=filename,
image_path=path)
mo_2 = CimcvmediaConfigMountEntry(parent_mo_or_dn=mo,
mapping_name="kickstartImage",
device_type="hdd",
mount_protocol=scheme,
remote_ip_address=address,
image_name_variable="service-profile-name",
image_path=path)
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteVirtualMedia(handle):
print "Deleting Kubernetes Virtual Media Policy"
mo = handle.query_dn("org-root/mnt-cfg-policy-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createScrubPolicy(handle):
from ucsmsdk.mometa.compute.ComputeScrubPolicy import ComputeScrubPolicy
mo = ComputeScrubPolicy(flex_flash_scrub="no",
parent_mo_or_dn="org-root",
name="kube",
disk_scrub="yes",
bios_settings_scrub="no",
descr="Destroy data when SP is unassociated")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def deleteScrubPolicy(handle):
print "Deleting Kubernetes Scrub Policy"
mo = handle.query_dn("org-root/scrub-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteDiskGroupConfig(handle):
print "Deleting Disk Group config"
mo = handle.query_dn("org-root/disk-group-config-Kube_Boot")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def deleteStorageProfile(handle):
print "Deleting Storage Profile"
mo = handle.query_dn("org-root/profile-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createDiskGroupConfig(handle):
print "Adding Disk Group Config"
from ucsmsdk.mometa.lstorage.LstorageDiskGroupConfigPolicy import LstorageDiskGroupConfigPolicy
from ucsmsdk.mometa.lstorage.LstorageDiskGroupQualifier import LstorageDiskGroupQualifier
from ucsmsdk.mometa.lstorage.LstorageVirtualDriveDef import LstorageVirtualDriveDef
mo = LstorageDiskGroupConfigPolicy(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube_boot",
descr="Kubernetes Boot Disk",
raid_level="mirror")
mo_1 = LstorageDiskGroupQualifier(parent_mo_or_dn=mo,
use_remaining_disks="no",
num_ded_hot_spares="unspecified",
drive_type="unspecified",
num_drives="2",
min_drive_size="unspecified",
num_glob_hot_spares="unspecified")
mo_2 = LstorageVirtualDriveDef(parent_mo_or_dn=mo, read_policy="platform-default",
drive_cache="platform-default",
strip_size="platform-default",
io_policy="platform-default",
write_cache_policy="platform-default",
access_policy="platform-default")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createStorageProfile(handle):
from ucsmsdk.mometa.lstorage.LstorageProfile import LstorageProfile
from ucsmsdk.mometa.lstorage.LstorageDasScsiLun import LstorageDasScsiLun
mo = LstorageProfile(parent_mo_or_dn="org-root",
policy_owner="local",
name="kube",
descr="Kubernetes Storage Profile")
mo_1 = LstorageDasScsiLun(parent_mo_or_dn=mo,
local_disk_policy_name="kube_boot",
auto_deploy="auto-deploy",
expand_to_avail="yes",
lun_map_type="non-shared",
size="1",
fractional_size="0",
admin_state="online",
deferred_naming="no",
order="not-applicable",
name="KubeLUN")
handle.add_mo(mo, modify_present=True)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\talready exists"
def createKubeServers(handle, org):
createKubeBootPolicy(handle)
#createKubeLocalDiskPolicy(handle)
createDiskGroupConfig(handle)
createStorageProfile(handle)
createScrubPolicy(handle)
createKubeUUIDPools(handle)
createKubeServerPool(handle)
createKubeVirtualMedia(handle)
servers = select_kube_servers(handle)
addServersToKubePool(handle, servers)
createServiceProfileTemplate(handle)
createServers(handle, servers)
def deleteKubeServers(handle, org):
deleteServers(handle)
deleteServiceProfileTemplate(handle)
deleteKubeServerPool(handle)
deleteVirtualMedia(handle)
deleteScrubPolicy(handle)
deleteKubeBootPolicy(handle)
deleteStorageProfile(handle)
deleteDiskGroupConfig(handle)
#deleteKubeLocalDiskPolicy(handle)
deleteKubeUUIDPools(handle)
| 36.129176
| 234
| 0.669461
|
b26ed21e0d84dc6ef3708b85d67b17ab3fd8b40b
| 42,684
|
py
|
Python
|
tests/unittests/test_cgr.py
|
felix-walter/pydtnsim
|
89376021fecf1f114c76cde773d7a92ab9fcf464
|
[
"MIT"
] | 8
|
2018-12-11T17:39:44.000Z
|
2021-05-07T10:24:03.000Z
|
tests/unittests/test_cgr.py
|
Elianelin/pydtnsim
|
916b0ebfa2b65b7a80af293dd4c39f862eadeae3
|
[
"MIT"
] | 13
|
2019-01-14T14:08:15.000Z
|
2021-06-12T17:03:43.000Z
|
tests/unittests/test_cgr.py
|
Elianelin/pydtnsim
|
916b0ebfa2b65b7a80af293dd4c39f862eadeae3
|
[
"MIT"
] | 4
|
2019-03-20T15:12:40.000Z
|
2022-02-22T06:16:24.000Z
|
import math
import pytest
from pydtnsim.routing import cgr_anchor
from pydtnsim.routing import cgr_basic
from pydtnsim.routing import scgr
from pydtnsim.backend import QSim
from pydtnsim.routing.cgr_basic import Route, Neighbor
from pydtnsim import Contact, ContactPlan, ContactGraph, Packet
from pydtnsim import ContactIdentifier
from pydtnsim.monitors import MonitorNotifier
testdata = [(cgr_basic), (cgr_anchor)]
testdata_routing = [(cgr_basic), (cgr_anchor), (scgr)]
class DummySimulator():
def __init__(self):
self.env = QSim()
self.notifier = MonitorNotifier(self.env)
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list(mod):
"""Test function that tests the route-finding capabilities of the
load_route_list function and tests the correctness.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 4 |
# [20;30]+--+---+--+[70:80]
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [10;20]+--+---+--+[40:50]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 10, 20)
contact_plan.add_contact('node1', 'node3', 40, 50)
contact_plan.add_contact('node2', 'node4', 20, 30)
contact_plan.add_contact('node3', 'node4', 70, 80)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node14 = mod.load_route_list(contact_graph, 'node1', 'node4', 0)
# Make sure that two routes were found
assert len(route_list_node14) == 2
# Assert characteristics of the found routes
route1 = route_list_node14[0]
assert route1.transmission_plan == ([('node1', 'node2', 10, 20, 1000, 0),
('node2', 'node4', 20, 30, 1000, 0)])
assert route1.edt == 20
assert route1.capacity == 10000
assert route1.to_time == 20
route2 = route_list_node14[1]
assert route2.transmission_plan == ([('node1', 'node3', 40, 50, 1000, 0),
('node3', 'node4', 70, 80, 1000, 0)])
assert route2.edt == 70
assert route2.capacity == 10000
assert route2.to_time == 50
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_unavailable_route(mod):
"""Test function that tests that no route is found.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+ +---+ +---+ +---+
# | 1 +---------+ 2 +---------+ 3 +---------+ 4 |
# +---+ 35:40 +---+ 20:40 +---+ 20:25 +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 35, 40)
contact_plan.add_contact('node2', 'node3', 20, 40)
contact_plan.add_contact('node3', 'node4', 20, 25)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node14 = mod.load_route_list(contact_graph, 'node1', 'node4', 0)
# Make sure that two routes were found
assert len(route_list_node14) == 0
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_no_route(mod):
"""Test function that tests the route-finding capabilities of the
load_route_list function and tests that no route is found if contacts on
route do not add up.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 5 |
# [20;30]+--+---+--+[70:80]
# | |
# +-+-+ +-+-+
# | 3 | | 4 |
# +-+-+ +-+-+
# | |
# [10;20]+--+---+--+[40:50]
# | 2 |
# +-+-+
# |
# |[50:60]
# |
# +-+-+
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 50, 60)
contact_plan.add_contact('node2', 'node3', 10, 20)
contact_plan.add_contact('node2', 'node4', 40, 50)
contact_plan.add_contact('node3', 'node5', 20, 30)
contact_plan.add_contact('node4', 'node5', 70, 80)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node15 = mod.load_route_list(contact_graph, 'node1', 'node5', 0)
# Make sure that two routes were found
assert len(route_list_node15) == 0
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_anchoring_first_contact(mod):
"""Test function that tests the route-finding capabilities, in particular
the correct behaviour when the anchoring mechanism is involved and the
limiting contact is the first contact of the route.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 5 |
# [0:100]+--+---+--+[0:100]
# | |
# +-+-+ +-+-+
# | 3 | | 4 |
# +-+-+ +-+-+
# | |
# [0:100]+--+---+--+[0:100]
# | 2 |
# +-+-+
# |
# |[30:70]
# |
# +-+-+
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 30, 70)
contact_plan.add_contact('node2', 'node3', 0, 100)
contact_plan.add_contact('node2', 'node4', 0, 100)
contact_plan.add_contact('node3', 'node5', 0, 100)
contact_plan.add_contact('node4', 'node5', 0, 100)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node15 = mod.load_route_list(contact_graph, 'node1', 'node5', 0)
# Make sure that only route is found (as both possible routes run through
# the identical first limiting contact and thus only one route suffices)
assert len(route_list_node15) == 1
# Check that the route is correct
route = route_list_node15[0]
assert route[0] == [('node1', 'node2', 30, 70, 1000, 0),
('node2', 'node3', 0, 100, 1000, 0),
('node3', 'node5', 0, 100, 1000, 0)] \
or route[0] == [('node1', 'node2', 30, 70, 1000, 0),
('node2', 'node4', 0, 100, 1000, 0),
('node4', 'node5', 0, 100, 1000, 0)]
assert route.edt == 30
assert route.capacity == 40000
assert route.to_time == 70
@pytest.mark.parametrize("mod", testdata)
def test_load_route_list_anchoring_intermediate_contact(mod):
"""Test function that tests the route-finding capabilities, in particular
the correct behaviour when the anchoring mechanism is involved and the
limiting contact is the first contact of the route.
"""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by load_route_list
# The following topology is tested in this test case:
# +---+
# | 8 |
# [0:100]+--+---+--+[30:90]
# | |
# +-+-+ +-+-+
# | 6 | | 7 |
# +-+-+ +-+-+
# | |
# [0:100]+--+---+--+[30:90]
# | 5 |
# +-+-+
# |
# |[30:70]
# |
# +-+-+
# | 4 |
# [30:90]+--+---+--+[0:100]
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [30:90]+--+---+--+[0:100]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Add contacts
contact_plan.add_contact('node1', 'node2', 30, 90)
contact_plan.add_contact('node1', 'node3', 0, 100)
contact_plan.add_contact('node2', 'node4', 30, 90)
contact_plan.add_contact('node3', 'node4', 0, 100)
contact_plan.add_contact('node4', 'node5', 30, 70)
contact_plan.add_contact('node5', 'node6', 0, 100)
contact_plan.add_contact('node5', 'node7', 30, 90)
contact_plan.add_contact('node6', 'node8', 0, 100)
contact_plan.add_contact('node7', 'node8', 30, 90)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Now generate a route list for possible routes from node1 to node4
route_list_node18 = mod.load_route_list(contact_graph, 'node1', 'node8', 0)
# Make sure that only route is found (as both possible routes run through
# the identical intermediate limiting contact and thus only one route is
# returned)
assert len(route_list_node18) == 1
# Check that the route is correct
route = route_list_node18[0]
assert route.edt == 30
assert route.capacity == 40000
assert route.to_time == 70
def generate_test_graph(remove_edge26=False):
"""Helper function to generate a contact graph for many testcases."""
# The following topology is tested in this test case:
# +---+
# | 8 |
# [0:100]+--+---+--+[30:90]
# | |
# +-+-+ +-+-+
# | 6 | | 7 |
# +-+-+ +-+-+
# | |
# | |
# [10:40]| |[40:80]
# | |
# | |
# +-+-+ +-+-+
# | 2 | | 3 |
# +-+-+ +-+-+
# | |
# [30:90]+--+---+--+[0:100]
# | 1 |
# +---+
contact_plan = ContactPlan(1000, 0)
# Create list of all nodes
contact_plan.add_contact('node1', 'node2', 30, 90)
contact_plan.add_contact('node1', 'node3', 0, 100)
contact_plan.add_contact('node3', 'node7', 40, 80)
contact_plan.add_contact('node6', 'node8', 0, 100)
contact_plan.add_contact('node7', 'node8', 30, 90)
# Only add edge between node2 and node6 if required
if not remove_edge26:
contact_plan.add_contact('node2', 'node6', 10, 40)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
return contact_graph
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_base(mod):
"""Test function that tests the identify_proximate_node_list."""
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by identify_proximate_node_list
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that two routes are found
assert len(proximate_nodes) == 2
# Check individual EDT and hops
node1 = proximate_nodes[0]
assert node1.route.edt == 30
assert node1.route.hops == 3
node2 = proximate_nodes[1]
assert node2.route.edt == 40
assert node2.route.hops == 3
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_past_route(mod):
"""Test function that verifys that identify_proximate_node_list() ignores
routes thats' feasibility ended in the past.
"""
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with current time set to 50
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 50, contact_list)
# Make sure that only one route is found, (1->2->6->8) has already expired
# at t=50
assert len(proximate_nodes) == 1
# Assert that the correct proximate node (and route) is returned
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == (('node1', 'node3', 0, 100, 1000, 0))
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_edt_after_deadline(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to 35
bundle = Packet('node1', 'node8', 1, 35)
# Route bundle from node1 to node 8 with size 1 and deadline set to 30
bundle2 = Packet('node1', 'node8', 1, 30)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with current time set to 0
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert len(proximate_nodes) == 1
# Assert that the correct proximate node (and route) is returned
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node2',
30, 90, 1000, 0),
('node2', 'node6',
10, 40, 1000, 0),
('node6', 'node8',
0, 100, 1000, 0)])
assert proximate_nodes[0].contact == (('node1', 'node2', 30, 90, 1000, 0))
# Now generate a proximate node list with current time set to 0
proximate_nodes2 = mod.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert not proximate_nodes2
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_route_capacity(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Create bundle from node1 to node 8 with size 40 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 40000, math.inf)
# Create bundle from node1 to node 8 with size 41 and deadline set to
# infinity
bundle2 = Packet('node1', 'node8', 41000, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list for 'bundle'
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are found as the bundle is not exceeding the
# capacities of the routes' contacts
assert len(proximate_nodes) == 1
# Now generate a proximate node list for 'bundle2'
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are not found as the bundle's size is larger than
# the capacities of all available routes
assert len(proximate_nodes) == 0
# Enqueue that packet to trigger the capacity recalculation
contact_list[('node1', 'node2', 30, 90, 1000, 0)].cap_rem = 10000
# Enqueue that packet to trigger the capacity recalculation
contact_list[('node1', 'node3', 0, 100, 1000, 0)].cap_rem = 40000
# Now generate a proximate node list for 'bundle'
proximate_nodes = cgr_basic.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that one route is found as the bundle is still fitting into
# the queue of the 1->3 contact
assert len(proximate_nodes) == 1
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 1000, 0)
# Now generate a proximate node list for 'bundle2'
proximate_nodes = cgr_basic.identify_proximate_node_list(
'node1', bundle2, contact_graph, route_list, [], 0, contact_list)
# Make sure that routes are not found as the bundle's size is larger than
# the remaining capacities of all feasible contacts to neighbors
assert not proximate_nodes
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_excluded_nodes(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, math.inf, 0)] \
.register_simulator(dummy)
# Now generate a proximate node list with ('node1', 'node2', 30, 90) being
# in the excluded node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, ['node2'], 0, contact_list)
# Make sure that only one route is found, (1->3->7->8) will not reach the
# target within the deadline
assert len(proximate_nodes) == 1
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 1000, 0),
('node3', 'node7',
40, 80, 1000, 0),
('node7', 'node8',
30, 90, 1000, 0)])
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 1000, 0)
def create_route(old_route):
plan = list()
for contact in old_route[0]:
plan.append(
ContactIdentifier(
from_node=contact[0],
to_node=contact[1],
from_time=contact[2],
to_time=contact[3],
datarate=contact[4],
delay=contact[5]))
new_route = Route(
transmission_plan=plan,
edt=old_route[1][0],
capacity=old_route[1][1],
to_time=old_route[1][2],
hops=len(old_route[0]),
next_hop=plan[0])
return new_route
@pytest.mark.parametrize("mod", testdata)
def test_proximate_nodes_optimize_proximate_node(mod):
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and deadline set to
# infinity
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create some fake routes that can later be used for optimizing the
# proximate node values
route_list['node8'] = []
# First, add route with worse EDT (50)
route_list['node8'].append(
create_route(([('node1', 'node3', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (50, 40, 70))))
# Then add route with better EDT (30)
route_list['node8'].append(
create_route(([('node1', 'node3', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# First, add route with 5 hops
route_list['node8'].append(
create_route(([('node1', 'node2', 30, 90, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# Then add route with only 4 hops
route_list['node8'].append(
create_route(([('node1', 'node2', 30, 90, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0),
('node4', 'node5', 30, 70, 10, 0)], (30, 40, 70))))
# Create contact list object
contact_list = dict()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 10, 0)] = Contact(
30, 90, 10, 'node1', 'node2')
contact_list[('node1', 'node3', 0, 100, 10, 0)] = Contact(
0, 100, 10, 'node1', 'node3')
contact_list[('node1', 'node1', 0, math.inf, 10, 0)] = Contact(
0, math.inf, 10, 'node1', 'node1')
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 10, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 10, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 10,
0)].register_simulator(dummy)
# Now generate a proximate node list with ('node1', 'node2', 30, 90) being
# in the excluded node list
proximate_nodes = mod.identify_proximate_node_list(
'node1', bundle, contact_graph, route_list, [], 0, contact_list)
# Make sure that two feasible proximate nodes are found
assert len(proximate_nodes) == 2
# Assert that the proximate nodes are returned with the correct, optimized
# characteristics
assert proximate_nodes[0].contact == ('node1', 'node3', 0, 100, 10, 0)
assert proximate_nodes[0].route.edt == 30
assert proximate_nodes[0].route.hops == 3
assert proximate_nodes[0].route.transmission_plan == ([('node1', 'node3',
0, 100, 10, 0),
('node3', 'node4',
0, 100, 10, 0),
('node4', 'node5',
30, 70, 10, 0)])
assert proximate_nodes[1].contact == ('node1', 'node2', 30, 90, 10, 0)
assert proximate_nodes[1].route.edt == 30
assert proximate_nodes[1].route.hops == 4
assert proximate_nodes[1].route.transmission_plan == ([
('node1', 'node2', 30, 90, 10, 0), ('node3', 'node4', 0, 100, 10, 0),
('node3', 'node4', 0, 100, 10, 0), ('node4', 'node5', 30, 70, 10, 0)
])
# Test function that tests the cgr function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_base(mod):
# First, create an contact plan that is then converted to the contact
# graph representation and later processed by cgr()
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueue for the correct contact
assert len(contact_list[('node1', 'node2', 30, 90, 1000, 0)] \
.packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.packet_queue) == 0
assert contact_list[('node1', 'node2', 30, 90, 1000, 0)] \
.packet_queue[0] == bundle
# Test function that tests the cgr function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_base_no_route(mod):
# First, create an contact plan that is then converted to the contact
# graph representation and later processed by cgr()
# The following topology is tested in this test case:
# +---+ +---+ +---+ +---+
# | 1 +---------+ 2 +---------+ 3 +---------+ 4 |
# +---+ 35:40 +---+ 20:40 +---+ 20:25 +---+
contact_plan = ContactPlan(1000, 0)
contact_plan.add_contact('node1', 'node2', 35, 40)
contact_plan.add_contact('node2', 'node3', 20, 40)
contact_plan.add_contact('node3', 'node4', 20, 25)
# Generate contact graph representation
contact_graph = ContactGraph(contact_plan)
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node4', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 35, 40, 1000, 0)] = Contact(
35, 40, 1000, 'node1', 'node2', debug=True)
contact_list[('node2', 'node3', 20, 40, 1000, 0)] = Contact(
20, 40, 1000, 'node2', 'node3', debug=True)
contact_list[('node3', 'node4', 20, 25, 1000, 0)] = Contact(
20, 25, 1000, 'node3', 'node4', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 35, 40, 1000, 0)].register_simulator(dummy)
contact_list[('node2', 'node3', 20, 40, 1000, 0)].register_simulator(dummy)
contact_list[('node3', 'node4', 20, 25, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueue for the correct contact
assert len(limbo) == 1
assert len(contact_list[('node1', 'node2', 35, 40, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node2', 'node3', 20, 40, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node3', 'node4', 20, 25, 1000, 0)] \
.packet_queue) == 0
assert len(contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.packet_queue) == 0
def generate_neighbors(old_neigbors):
new_neighbors = list()
for neighbor in old_neigbors:
tp = list()
for hops in neighbor[3]:
tp.append(
ContactIdentifier(
from_node=hops[0],
to_node=hops[1],
from_time=hops[2],
to_time=hops[3],
datarate=hops[4],
delay=hops[5]))
route = Route(
transmission_plan=tp,
edt=neighbor[1],
capacity=1000,
to_time=10000,
hops=neighbor[2],
next_hop=ContactIdentifier(*neighbor[0]))
new_neighbor = Neighbor(
contact=route.next_hop,
node_id=route.next_hop.to_node,
route=route)
new_neighbors.append(new_neighbor)
return new_neighbors
@pytest.mark.parametrize("mod", testdata)
def test_cgr_optimization(mod):
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 10, 100, 1000, 0)] = Contact(
10, 100, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 10, 100, 1000, 0)] = Contact(
20, 100, 1000, 'node1', 'node3', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.register_simulator(dummy)
contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.register_simulator(dummy)
# Create a fake proximate node list to isolate the cgr() function's
# behaviour and test it
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 10, 2,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 2,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.packet_queue) == 1
assert not contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.packet_queue
assert contact_list[('node1', 'node2', 10, 100, 1000, 0)] \
.packet_queue[0] == bundle
# Alter proximate node list so that edt is equal and hops is the relevant
# value
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 20, 3,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 2,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(contact_list[('node1', 'node2', 10, 100, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 10, 100, 1000,
0)].packet_queue) == 1
assert contact_list[('node1', 'node3', 10, 100, 1000, 0)] \
.packet_queue[0] == bundle
# Alter proximate node list so that edt and hops are equal and hash is the
# deciding value
proximate_nodes = [(('node1', 'node2', 10, 100, 1000, 0), 20, 4,
[('node1', 'node2', 10, 100, 1000, 0)]),
(('node1', 'node3', 10, 100, 1000, 0), 20, 4,
[('node1', 'node3', 10, 100, 1000, 0)])]
proximate_nodes = generate_neighbors(proximate_nodes)
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Reset bundle so it can be routed from the same node again without
# throwing an exception
bundle.current_node = 'inserted'
if hash('node2') > hash('node3'):
node_a = ('node1', 'node2', 10, 100, 1000, 0)
node_b = ('node1', 'node3', 10, 100, 1000, 0)
else:
node_b = ('node1', 'node2', 10, 100, 1000, 0)
node_a = ('node1', 'node3', 10, 100, 1000, 0)
# Make sure that the bundle is enqueued in the queue with the best edt
if len(contact_list[node_a].packet_queue) == 1:
assert len(contact_list[node_b].packet_queue) == 2
assert contact_list[node_b].packet_queue[1] == bundle
elif len(contact_list[node_a].packet_queue) == 2:
assert len(contact_list[node_b].packet_queue) == 1
assert contact_list[node_a].packet_queue[1] == bundle
# Testcase function that verifies the insertion of bundles that are not
# routable at the moment of routing into the limbo list
@pytest.mark.parametrize("mod", testdata)
def test_cgr_limbo(mod):
# Route bundle from node1 to node 8 with size 1 and no deadline
bundle = Packet('node1', 'node8', 1, math.inf)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
# Create a fake proximate node list to isolate the cgr() function's
# behaviour and test it, no entries this time to force in insertion into
# limbo
proximate_nodes = []
# Now run cgr for the bundle (with current time set to 0)
mod.cgr(
bundle,
'node1',
None,
route_list,
contact_list,
0,
limbo,
proximate_nodes=proximate_nodes)
# Make sure that the bundle is enqueued in the queue with the best edt
assert len(limbo) == 1
assert limbo[0] == bundle
# Test function that verifies the "flooding" mechanism for critical
# bundles in the cgr() function
@pytest.mark.parametrize("mod", testdata_routing)
def test_cgr_critical_bundle(mod):
# First, create an contact plan that is then converted to the contact graph
# representation and later processed by identify_proximate_node_list
# Create contact graph of test topology
contact_graph = generate_test_graph()
# Route bundle from node1 to node 8 with size 1 and no deadline, but with
# being critical
bundle = Packet('node1', 'node8', 1000, math.inf, False, True)
# Create empty route_list dictionary so route list for destination will be
# regenerated
route_list = dict()
# Create contact list object
contact_list = dict()
# Create limbo list
limbo = list()
# Create Simulation Environment (dummy, will not be run)
env = QSim()
contact_list[('node1', 'node2', 30, 90, 1000, 0)] = Contact(
30, 90, 1000, 'node1', 'node2', debug=True)
contact_list[('node1', 'node3', 0, 100, 1000, 0)] = Contact(
0, 100, 1000, 'node1', 'node3', debug=True)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] = Contact(
0, math.inf, 1000, 'node1', 'node1', debug=True)
# Add dummy simulator to the contacts
dummy = DummySimulator()
contact_list[('node1', 'node2', 30, 90, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node3', 0, 100, 1000, 0)].register_simulator(dummy)
contact_list[('node1', 'node1', 0, math.inf, 1000, 0)] \
.register_simulator(dummy)
mod.cgr(bundle, 'node1', contact_graph, route_list, contact_list, 0, limbo)
# Make sure that the bundle is enqueued in all feasible contacts to
# neighbors
assert len(contact_list[('node1', 'node2', 30, 90, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000,
0)].packet_queue) == 1
contact_graph2 = generate_test_graph(True)
# Route bundle from node1 to node 8 with size 1 and no deadline, but with
# being critical
bundle2 = Packet('node1', 'node8', 1, math.inf, False, True)
# Reset route list
route_list = dict()
mod.cgr(bundle2, 'node1', contact_graph2, route_list, contact_list, 0,
limbo)
# Make sure that only neighbors are considered that can reach the
# destination (based on the contact graph knowledge)
assert len(contact_list[('node1', 'node2', 30, 90, 1000,
0)].packet_queue) == 1
assert len(contact_list[('node1', 'node3', 0, 100, 1000,
0)].packet_queue) == 2
| 36.796552
| 79
| 0.582724
|
a2dcba16d7f84d103ddf78a33a64cea8d4d6f1de
| 2,494
|
py
|
Python
|
Demo/crawler_heart/heart/heart.py
|
zuosc/PythonCode
|
3592f5780fc9e335fa880a1f7e9aac0e9d33439e
|
[
"MIT"
] | 1
|
2017-09-24T19:06:55.000Z
|
2017-09-24T19:06:55.000Z
|
Demo/crawler_heart/heart/heart.py
|
zuosc/PythonCode
|
3592f5780fc9e335fa880a1f7e9aac0e9d33439e
|
[
"MIT"
] | null | null | null |
Demo/crawler_heart/heart/heart.py
|
zuosc/PythonCode
|
3592f5780fc9e335fa880a1f7e9aac0e9d33439e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import codecs
import csv
import re
import jieba.analyse
import matplotlib.pyplot as plt
import requests
from scipy.misc import imread
from wordcloud import WordCloud
__author__ = 'liuzhijun'
cookies = {
"ALF": "1489741225",
"SCF": "AlE23pvO55c-tjcvq_9U-BwEK7OzuLdN6XnI-Bg1vQPlNcm7Atblq34rFR1PIei5Sp-jX1oikqxt7eLYtw_ZJ4c.",
"SUBP": "0033WrSXqPxfM725Ws9jqgMF55529P9D9W5eUgwfgjhvYJraoCA.ZDn.5JpX5KMhUgL.Foe71K2NeozpSh-2dJLoIEBLxKBLBonL1h5LxKqL1-BL1h5LxKML1K.LB.-LxK.L1hML12et",
"SUB": "_2A251rjzFDeRxGeVO4lMW8izNzzmIHXVXUUSNrDV6PUNbktBeLRnMkW1ZGDRFqMInP5uvmMQRhehPb0ySLA..",
"SUHB": "0BLYeDP6-UgtFq",
"_T_WM": "954dbf366abd053f9c7793df6c4fba79",
"SSOLoginState": "1487555733",
}
def fetch_weibo():
api = "http://m.weibo.cn/index/my?format=cards&page=%s"
for i in range(1, 102):
response = requests.get(url=api % i, cookies=cookies)
data = response.json()[0]
groups = data.get("card_group") or []
for group in groups:
text = group.get("mblog").get("text")
text = text.encode("utf-8")
def cleanring(content):
"""
去掉无用字符
"""
pattern = "<a .*?/a>|<i .*?/i>|转发微博|//:|Repost|,|?|。|、|分享图片"
content = re.sub(pattern, "", content)
return content
text = cleanring(text).strip()
if text:
yield text
def write_csv(texts):
with codecs.open('./weibo.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames=["text"])
writer.writeheader()
for text in texts:
writer.writerow({"text": text})
def read_csv():
with codecs.open('./weibo.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
yield row['text']
def word_segment(texts):
jieba.analyse.set_stop_words("./stopwords.txt")
for text in texts:
tags = jieba.analyse.extract_tags(text, topK=20)
yield " ".join(tags)
def generate_img(texts):
data = " ".join(text for text in texts)
mask_img = imread('./heart-mask.jpg', flatten=True)
wordcloud = WordCloud(
font_path='msyh.ttc',
background_color='white',
mask=mask_img
).generate(data)
plt.imshow(wordcloud)
plt.axis('off')
plt.savefig('./heart.jpg', dpi=600)
if __name__ == '__main__':
texts = fetch_weibo()
write_csv(texts)
generate_img(word_segment(read_csv()))
| 28.340909
| 155
| 0.61668
|
5cb8d5a6f969726c0cc6a48d2eaf84d835dbc89e
| 94,122
|
py
|
Python
|
src/sage/manifolds/differentiable/affine_connection.py
|
tashakim/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | 4
|
2020-07-17T04:49:44.000Z
|
2020-07-29T06:33:51.000Z
|
src/sage/manifolds/differentiable/affine_connection.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | null | null | null |
src/sage/manifolds/differentiable/affine_connection.py
|
Ivo-Maffei/sage
|
467fbc70a08b552b3de33d9065204ee9cbfb02c7
|
[
"BSL-1.0"
] | null | null | null |
r"""
Affine Connections
The class :class:`AffineConnection` implements affine connections on
smooth manifolds.
AUTHORS:
- Eric Gourgoulhon, Michal Bejger (2013-2015) : initial version
- Marco Mancini (2015) : parallelization of some computations
- Florentin Jaffredo (2018) : series expansion with respect to a given
parameter
REFERENCES:
- [Lee1997]_
- [KN1963]_
- [ONe1983]_
"""
# *****************************************************************************
# Copyright (C) 2015 Eric Gourgoulhon <eric.gourgoulhon@obspm.fr>
# Copyright (C) 2015 Michal Bejger <bejger@camk.edu.pl>
# Copyright (C) 2015 Marco Mancini <marco.mancini@obspm.fr>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# https://www.gnu.org/licenses/
# *****************************************************************************
from sage.rings.integer import Integer
from sage.structure.sage_object import SageObject
from sage.manifolds.differentiable.manifold import DifferentiableManifold
from sage.parallel.decorate import parallel
from sage.parallel.parallelism import Parallelism
class AffineConnection(SageObject):
r"""
Affine connection on a smooth manifold.
Let `M` be a differentiable manifold of class `C^\infty` (smooth manifold)
over a non-discrete topological field `K` (in most applications `K=\RR`
or `K=\CC`), let `C^\infty(M)` be the algebra of smooth functions
`M\rightarrow K` (cf.
:class:`~sage.manifolds.differentiable.scalarfield_algebra.DiffScalarFieldAlgebra`)
and let `\mathfrak{X}(M)` be the `C^\infty(M)`-module of vector fields on
`M` (cf.
:class:`~sage.manifolds.differentiable.vectorfield_module.VectorFieldModule`).
An *affine connection* on `M` is an operator
.. MATH::
\begin{array}{cccc}
\nabla: & \mathfrak{X}(M)\times \mathfrak{X}(M) & \longrightarrow &
\mathfrak{X}(M) \\
& (u,v) & \longmapsto & \nabla_u v
\end{array}
that
- is `K`-bilinear, i.e. is bilinear when considering `\mathfrak{X}(M)` as a
vector space over `K`
- is `C^\infty(M)`-linear w.r.t. the first argument:
`\forall f\in C^\infty(M),\ \nabla_{fu} v = f\nabla_u v`
- obeys Leibniz rule w.r.t. the second argument:
`\forall f\in C^\infty(M),\ \nabla_u (f v) = \mathrm{d}f(u)\, v + f \nabla_u v`
The affine connection `\nabla` gives birth to the *covariant derivative
operator* acting on tensor fields, denoted by the same symbol:
.. MATH::
\begin{array}{cccc}
\nabla: & T^{(k,l)}(M) & \longrightarrow & T^{(k,l+1)}(M)\\
& t & \longmapsto & \nabla t
\end{array}
where `T^{(k,l)}(M)` stands for the `C^\infty(M)`-module of tensor fields
of type `(k,l)` on `M` (cf.
:class:`~sage.manifolds.differentiable.tensorfield_module.TensorFieldModule`),
with the convention `T^{(0,0)}(M):=C^\infty(M)`.
For a vector field `v`, the covariant derivative `\nabla v` is a
type-(1,1) tensor field such that
.. MATH::
\forall u \in\mathfrak{X}(M), \ \nabla_u v = \nabla v(., u)
More generally for any tensor field `t\in T^{(k,l)}(M)`, we have
.. MATH::
\forall u \in\mathfrak{X}(M), \ \nabla_u t = \nabla t(\ldots, u)
.. NOTE::
The above convention means that, in terms of index notation,
the "derivation index" in `\nabla t` is the *last* one:
.. MATH::
\nabla_c t^{a_1\ldots a_k}_{\quad\quad b_1\ldots b_l} =
(\nabla t)^{a_1\ldots a_k}_{\quad\quad b_1\ldots b_l c}
INPUT:
- ``domain`` -- the manifold on which the connection is defined
(must be an instance of class
:class:`~sage.manifolds.differentiable.manifold.DifferentiableManifold`)
- ``name`` -- name given to the affine connection
- ``latex_name`` -- (default: ``None``) LaTeX symbol to denote the affine
connection; if ``None``, it is set to ``name``.
EXAMPLES:
Affine connection on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla') ; nab
Affine connection nabla on the 3-dimensional differentiable manifold M
A just-created connection has no connection coefficients::
sage: nab._coefficients
{}
The connection coefficients relative to the manifold's default frame
[here `(\partial/\partial x, \partial/\partial y, \partial/\partial z)`],
are created by providing the relevant indices inside square brackets::
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z # Gamma^1_{12} = x^2, Gamma^3_{23} = yz
sage: nab._coefficients
{Coordinate frame (M, (d/dx,d/dy,d/dz)): 3-indices components w.r.t.
Coordinate frame (M, (d/dx,d/dy,d/dz))}
If not the default one, the vector frame w.r.t. which the connection
coefficients are defined can be specified as the first argument inside the
square brackets; hence the above definition is equivalent to::
sage: nab[c_xyz.frame(), 1,1,2], nab[c_xyz.frame(),3,2,3] = x^2, y*z
sage: nab._coefficients
{Coordinate frame (M, (d/dx,d/dy,d/dz)): 3-indices components w.r.t.
Coordinate frame (M, (d/dx,d/dy,d/dz))}
Unset components are initialized to zero::
sage: nab[:] # list of coefficients relative to the manifold's default vector frame
[[[0, x^2, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, y*z], [0, 0, 0]]]
The treatment of connection coefficients in a given vector frame is similar
to that of tensor components; see therefore the class
:class:`~sage.manifolds.differentiable.tensorfield.TensorField` for the
documentation. In particular, the square brackets return the connection
coefficients as instances of
:class:`~sage.manifolds.chart_func.ChartFunction`,
while the double square brackets return a scalar field::
sage: nab[1,1,2]
x^2
sage: nab[1,1,2].display()
(x, y, z) |--> x^2
sage: type(nab[1,1,2])
<class 'sage.manifolds.chart_func.ChartFunctionRing_with_category.element_class'>
sage: nab[[1,1,2]]
Scalar field on the 3-dimensional differentiable manifold M
sage: nab[[1,1,2]].display()
M --> R
(x, y, z) |--> x^2
sage: nab[[1,1,2]].coord_function() is nab[1,1,2]
True
Action on a scalar field::
sage: f = M.scalar_field(x^2 - y^2, name='f')
sage: Df = nab(f) ; Df
1-form df on the 3-dimensional differentiable manifold M
sage: Df[:]
[2*x, -2*y, 0]
The action of an affine connection on a scalar field must
coincide with the differential::
sage: Df == f.differential()
True
A generic affine connection has some torsion::
sage: DDf = nab(Df) ; DDf
Tensor field nabla(df) of type (0,2) on the 3-dimensional
differentiable manifold M
sage: DDf.antisymmetrize()[:] # nabla does not commute on scalar fields:
[ 0 -x^3 0]
[ x^3 0 0]
[ 0 0 0]
Let us check the standard formula
.. MATH::
\nabla_j \nabla_i \, f - \nabla_i \nabla_j \, f =
T^k_{\ \, ij} \nabla_k \, f ,
where the `T^k_{\ \, ij}`'s are the components of the connection's
torsion tensor::
sage: 2*DDf.antisymmetrize() == nab.torsion().contract(0,Df)
True
The connection acting on a vector field::
sage: v = M.vector_field(y*z, x*z, x*y, name='v')
sage: Dv = nab(v) ; Dv
Tensor field nabla(v) of type (1,1) on the 3-dimensional differentiable
manifold M
sage: Dv[:]
[ 0 (x^2*y + 1)*z y]
[ z 0 x]
[ y x x*y*z^2]
Another example: connection on a non-parallelizable 2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y), intersection_name='W',
....: restrictions1= x>0, restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: W = U.intersection(V)
sage: eU = c_xy.frame() ; eV = c_uv.frame()
sage: c_xyW = c_xy.restrict(W) ; c_uvW = c_uv.restrict(W)
sage: eUW = c_xyW.frame() ; eVW = c_uvW.frame()
sage: nab = M.affine_connection('nabla', r'\nabla')
The connection is first defined on the open subset U by means of its
coefficients w.r.t. the frame eU (the manifold's default frame)::
sage: nab[0,0,0], nab[1,0,1] = x, x*y
The coefficients w.r.t the frame eV are deduced by continuation of the
coefficients w.r.t. the frame eVW on the open subset `W=U\cap V`::
sage: for i in M.irange():
....: for j in M.irange():
....: for k in M.irange():
....: nab.add_coef(eV)[i,j,k] = nab.coef(eVW)[i,j,k,c_uvW].expr()
At this stage, the connection is fully defined on all the manifold::
sage: nab.coef(eU)[:]
[[[x, 0], [0, 0]], [[0, x*y], [0, 0]]]
sage: nab.coef(eV)[:]
[[[1/16*u^2 - 1/16*v^2 + 1/8*u + 1/8*v, -1/16*u^2 + 1/16*v^2 + 1/8*u + 1/8*v],
[1/16*u^2 - 1/16*v^2 + 1/8*u + 1/8*v, -1/16*u^2 + 1/16*v^2 + 1/8*u + 1/8*v]],
[[-1/16*u^2 + 1/16*v^2 + 1/8*u + 1/8*v, 1/16*u^2 - 1/16*v^2 + 1/8*u + 1/8*v],
[-1/16*u^2 + 1/16*v^2 + 1/8*u + 1/8*v, 1/16*u^2 - 1/16*v^2 + 1/8*u + 1/8*v]]]
We may let it act on a vector field defined globally on `M`::
sage: a = M.vector_field({eU: [-y,x]}, name='a')
sage: a.add_comp_by_continuation(eV, W, c_uv)
sage: a.display(eU)
a = -y d/dx + x d/dy
sage: a.display(eV)
a = v d/du - u d/dv
sage: da = nab(a) ; da
Tensor field nabla(a) of type (1,1) on the 2-dimensional differentiable
manifold M
sage: da.display(eU)
nabla(a) = -x*y d/dx*dx - d/dx*dy + d/dy*dx - x*y^2 d/dy*dy
sage: da.display(eV)
nabla(a) = (-1/16*u^3 + 1/16*u^2*v + 1/16*(u + 2)*v^2 - 1/16*v^3 - 1/8*u^2) d/du*du
+ (1/16*u^3 - 1/16*u^2*v - 1/16*(u - 2)*v^2 + 1/16*v^3 - 1/8*u^2 + 1) d/du*dv
+ (1/16*u^3 - 1/16*u^2*v - 1/16*(u - 2)*v^2 + 1/16*v^3 - 1/8*u^2 - 1) d/dv*du
+ (-1/16*u^3 + 1/16*u^2*v + 1/16*(u + 2)*v^2 - 1/16*v^3 - 1/8*u^2) d/dv*dv
A few tests::
sage: nab(a.restrict(V)) == da.restrict(V)
True
sage: nab.restrict(V)(a) == da.restrict(V)
True
sage: nab.restrict(V)(a.restrict(U)) == da.restrict(W)
True
sage: nab.restrict(U)(a.restrict(V)) == da.restrict(W)
True
Same examples with SymPy as the engine for symbolic calculus::
sage: M.set_calculus_method('sympy')
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[0,0,0], nab[1,0,1] = x, x*y
sage: for i in M.irange():
....: for j in M.irange():
....: for k in M.irange():
....: nab.add_coef(eV)[i,j,k] = nab.coef(eVW)[i,j,k,c_uvW].expr()
At this stage, the connection is fully defined on all the manifold::
sage: nab.coef(eU)[:]
[[[x, 0], [0, 0]], [[0, x*y], [0, 0]]]
sage: nab.coef(eV)[:]
[[[u**2/16 + u/8 - v**2/16 + v/8, -u**2/16 + u/8 + v**2/16 + v/8],
[u**2/16 + u/8 - v**2/16 + v/8, -u**2/16 + u/8 + v**2/16 + v/8]],
[[-u**2/16 + u/8 + v**2/16 + v/8, u**2/16 + u/8 - v**2/16 + v/8],
[-u**2/16 + u/8 + v**2/16 + v/8, u**2/16 + u/8 - v**2/16 + v/8]]]
We may let it act on a vector field defined globally on `M`::
sage: a = M.vector_field({eU: [-y,x]}, name='a')
sage: a.add_comp_by_continuation(eV, W, c_uv)
sage: a.display(eU)
a = -y d/dx + x d/dy
sage: a.display(eV)
a = v d/du - u d/dv
sage: da = nab(a) ; da
Tensor field nabla(a) of type (1,1) on the 2-dimensional differentiable
manifold M
sage: da.display(eU)
nabla(a) = -x*y d/dx*dx - d/dx*dy + d/dy*dx - x*y**2 d/dy*dy
sage: da.display(eV)
nabla(a) = (-u**3/16 + u**2*v/16 - u**2/8 + u*v**2/16 - v**3/16 + v**2/8) d/du*du
+ (u**3/16 - u**2*v/16 - u**2/8 - u*v**2/16 + v**3/16 + v**2/8 + 1) d/du*dv
+ (u**3/16 - u**2*v/16 - u**2/8 - u*v**2/16 + v**3/16 + v**2/8 - 1) d/dv*du
+ (-u**3/16 + u**2*v/16 - u**2/8 + u*v**2/16 - v**3/16 + v**2/8) d/dv*dv
"""
def __init__(self, domain, name, latex_name=None):
r"""
Construct an affine connection.
TESTS::
sage: M = Manifold(3, 'M')
sage: from sage.manifolds.differentiable.affine_connection import \
AffineConnection
sage: nab = AffineConnection(M, 'nabla', latex_name=r'\nabla')
sage: nab
Affine connection nabla on the 3-dimensional differentiable
manifold M
sage: X.<x,y,z> = M.chart()
sage: nab[0,1,0] = x*y*z
sage: TestSuite(nab).run()
"""
if not isinstance(domain, DifferentiableManifold):
raise TypeError("the first argument must be a differentiable " +
"manifold")
self._domain = domain
self._name = name
if latex_name is None:
self._latex_name = self._name
else:
self._latex_name = latex_name
self._coefficients = {} # dict. of connection coefficients, with the
# vector frames as keys
# Initialization of derived quantities:
AffineConnection._init_derived(self)
def _repr_(self):
r"""
String representation of the object.
TESTS::
sage: M = Manifold(5, 'M')
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab._repr_()
'Affine connection nabla on the 5-dimensional differentiable manifold M'
sage: repr(nab) # indirect doctest
'Affine connection nabla on the 5-dimensional differentiable manifold M'
"""
description = "Affine connection"
if self._name is not None:
description += " " + self._name
description += " on the {}".format(self._domain)
return description
def _latex_(self):
r"""
LaTeX representation of the object.
TESTS::
sage: M = Manifold(5, 'M')
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab._latex_()
'\\nabla'
sage: latex(nab) # indirect doctest
\nabla
sage: nab = M.affine_connection('D')
sage: nab._latex_()
'D'
sage: latex(nab) # indirect doctest
D
"""
return self._latex_name
def _init_derived(self):
r"""
Initialize the derived quantities.
TESTS::
sage: M = Manifold(4, 'M')
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab._init_derived()
"""
self._restrictions = {} # dict. of restrictions of ``self`` on some
# subdomains, with the subdomains as keys
self._torsion = None
self._riemann = None
self._ricci = None
self._connection_forms = {} # dict. of dict. of connection 1-forms
# (key: vector frame)
self._torsion_forms = {} # dict. of dict. of torsion 1-forms
# (key: vector frame)
self._curvature_forms = {} # dict. of dict. of curvature 2-forms
# (key: vector frame)
self._hash = -1
def _del_derived(self):
r"""
Delete the derived quantities.
TESTS::
sage: M = Manifold(4, 'M')
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab._del_derived()
"""
self._restrictions.clear()
self._torsion = None
self._riemann = None
self._ricci = None
self._connection_forms.clear()
self._torsion_forms.clear()
self._curvature_forms.clear()
def __eq__(self, other):
r"""
Comparison (equality) operator.
INPUT:
- ``other`` -- an affine connection
OUTPUT:
- ``True`` if ``self`` is equal to ``other`` and ``False`` otherwise
TESTS::
sage: M = Manifold(2, 'M')
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab[0,1,0], nab[0,1,1] = 1+x, x*y
sage: nab.display()
Gam^x_yx = x + 1
Gam^x_yy = x*y
sage: nab1 = M.affine_connection('nabla', latex_name=r'\nabla')
sage: (nab1 == nab) or (nab == nab1)
False
sage: nab1[0,1,0], nab1[0,1,1] = 2, 3-y
sage: (nab1 == nab) or (nab == nab1)
False
sage: nab1[0,1,0], nab1[0,1,1] = 1+x, x*y
sage: (nab1 == nab) and (nab == nab1)
True
sage: nab2 = M.affine_connection('nabla', latex_name=r'\nabla')
sage: a = M.automorphism_field()
sage: a[:] = [[0,1], [1,0]]
sage: e = X.frame().new_frame(a, 'e')
sage: nab2.set_coef(e)[1,0,1] = 1+x
sage: nab2.set_coef(e)[1,0,0] = x*y
sage: (nab2 == nab) and (nab == nab2)
True
sage: f = M.vector_frame('f')
sage: nab2.set_coef(f)[1,0,1] = x-y
sage: (nab2 == nab) or (nab == nab2)
False
"""
if other is self:
return True
if not isinstance(other, AffineConnection):
return False
if other._domain != self._domain:
return False
if self._coefficients == {}:
return False
for frame, coef in self._coefficients.items():
try:
if other.coef(frame) != coef:
return False
except ValueError:
return False
return True
def __ne__(self, other):
r"""
Inequality operator.
INPUT:
- ``other`` -- an affine connection
OUTPUT:
- ``True`` if ``self`` is different from ``other`` and ``False``
otherwise
TESTS::
sage: M = Manifold(2, 'M')
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab[0,1,0], nab[0,1,1] = 1+x, x*y
sage: nab1 = M.affine_connection('nabla', latex_name=r'\nabla')
sage: (nab1 != nab) and (nab != nab1)
True
sage: nab1[0,1,0], nab1[0,1,1] = 2, 3-y
sage: (nab1 != nab) and (nab != nab1)
True
sage: nab1[0,1,0], nab1[0,1,1] = 1+x, x*y
sage: (nab1 != nab) or (nab != nab1)
False
"""
return not (self == other)
def domain(self):
r"""
Return the manifold subset on which the affine connection is defined.
OUTPUT:
- instance of class
:class:`~sage.manifolds.differentiable.manifold.DifferentiableManifold`
representing the manifold on which ``self`` is defined.
EXAMPLES::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab.domain()
3-dimensional differentiable manifold M
sage: U = M.open_subset('U', coord_def={c_xyz: x>0})
sage: nabU = U.affine_connection('D')
sage: nabU.domain()
Open subset U of the 3-dimensional differentiable manifold M
"""
return self._domain
def _new_coef(self, frame):
r"""
Create the connection coefficients w.r.t. the given frame.
This method, to be called by :meth:`coef`, must be redefined by derived
classes to adapt the output to the relevant subclass of
:class:`~sage.tensor.modules.comp.Components`.
TESTS::
sage: M = Manifold(2, 'M')
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab._new_coef(X.frame())
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy))
"""
from sage.tensor.modules.comp import Components
from sage.manifolds.differentiable.scalarfield import DiffScalarField
return Components(frame._domain.scalar_field_algebra(), frame, 3,
start_index=self._domain._sindex,
output_formatter=DiffScalarField.coord_function)
def coef(self, frame=None):
r"""
Return the connection coefficients relative to the given frame.
`n` being the manifold's dimension, the connection coefficients
relative to the vector frame `(e_i)` are the `n^3` scalar fields
`\Gamma^k_{\ \, ij}` defined by
.. MATH::
\nabla_{e_j} e_i = \Gamma^k_{\ \, ij} e_k
If the connection coefficients are not known already, they are computed
from the above formula.
INPUT:
- ``frame`` -- (default: ``None``) vector frame relative to which the
connection coefficients are required; if none is provided, the
domain's default frame is assumed
OUTPUT:
- connection coefficients relative to the frame ``frame``, as an
instance of the class :class:`~sage.tensor.modules.comp.Components`
with 3 indices ordered as `(k,i,j)`
EXAMPLES:
Connection coefficient of an affine connection on a 3-dimensional
manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z # Gamma^1_{12} = x^2, Gamma^3_{23} = yz
sage: nab.coef()
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy,d/dz))
sage: type(nab.coef())
<class 'sage.tensor.modules.comp.Components'>
sage: M.default_frame()
Coordinate frame (M, (d/dx,d/dy,d/dz))
sage: nab.coef() is nab.coef(c_xyz.frame())
True
sage: nab.coef()[:] # full list of coefficients:
[[[0, x^2, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, y*z], [0, 0, 0]]]
"""
if frame is None:
frame = self._domain.default_frame()
if frame not in self._coefficients:
# the coefficients must be computed
#
# Check whether frame is a subframe of a frame in which the
# coefficients are already known:
for oframe in self._coefficients:
if frame in oframe._subframes:
self._coefficients[frame] = self._new_coef(frame)
comp_store = self._coefficients[frame]._comp
ocomp_store = self._coefficients[oframe]._comp
for ind, value in ocomp_store.items():
comp_store[ind] = value.restrict(frame._domain)
break
else:
# If not, the coefficients must be computed from scratch:
manif = self._domain
ev = frame # the vector frame
ef = ev.coframe() # the dual frame
gam = self._new_coef(ev)
for i in manif.irange():
nab_evi = self(ev[i])
for k in manif.irange():
for j in manif.irange():
gam[[k,i,j]] = nab_evi(ef[k],ev[j])
self._coefficients[frame] = gam
return self._coefficients[frame]
def set_coef(self, frame=None):
r"""
Return the connection coefficients in a given frame for assignment.
See method :meth:`coef` for details about the definition of the
connection coefficients.
The connection coefficients with respect to other frames are deleted,
in order to avoid any inconsistency. To keep them, use the method
:meth:`add_coef` instead.
INPUT:
- ``frame`` -- (default: ``None``) vector frame in which the connection
coefficients are defined; if ``None``, the default frame of the
connection's domain is assumed.
OUTPUT:
- connection coefficients in the given frame, as an instance of the
class :class:`~sage.tensor.modules.comp.Components`; if such
connection coefficients did not exist previously, they are created.
See method :meth:`coef` for the storage convention of the connection
coefficients.
EXAMPLES:
Setting the coefficients of an affine connection w.r.t. some coordinate
frame::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: eX = X.frame(); eX
Coordinate frame (M, (d/dx,d/dy))
sage: nab.set_coef(eX)
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy))
sage: nab.set_coef(eX)[1,2,1] = x*y
sage: nab.display(eX)
Gam^x_yx = x*y
Since ``eX`` is the manifold's default vector frame, its mention may
be omitted::
sage: nab.set_coef()[1,2,1] = x*y
sage: nab.set_coef()
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy))
sage: nab.set_coef()[1,2,1] = x*y
sage: nab.display()
Gam^x_yx = x*y
To set the coefficients in the default frame, one can even bypass the
method ``set_coef()`` and call directly the operator ``[]`` on the
connection object::
sage: nab[1,2,1] = x*y
sage: nab.display()
Gam^x_yx = x*y
Setting the connection coefficients w.r.t. to another vector frame::
sage: e = M.vector_frame('e')
sage: nab.set_coef(e)
3-indices components w.r.t. Vector frame (M, (e_1,e_2))
sage: nab.set_coef(e)[2,1,1] = x+y
sage: nab.set_coef(e)[2,1,2] = x-y
sage: nab.display(e)
Gam^2_11 = x + y
Gam^2_12 = x - y
The coefficients w.r.t. the frame ``eX`` have been deleted::
sage: nab.display(eX)
Traceback (most recent call last):
...
ValueError: no common frame found for the computation
To keep them, use the method :meth:`add_coef` instead.
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._coefficients:
if frame not in self._domain._frames:
raise ValueError("the {} is not".format(frame) +
" a frame on the {}".format(self._domain))
self._coefficients[frame] = self._new_coef(frame)
self._del_derived() # deletes the derived quantities
self.del_other_coef(frame)
return self._coefficients[frame]
def add_coef(self, frame=None):
r"""
Return the connection coefficients in a given frame for assignment,
keeping the coefficients in other frames.
See method :meth:`coef` for details about the definition of the
connection coefficients.
To delete the connection coefficients in other frames, use the method
:meth:`set_coef` instead.
INPUT:
- ``frame`` -- (default: ``None``) vector frame in which the connection
coefficients are defined; if ``None``, the default frame of the
connection's domain is assumed.
.. WARNING::
If the connection has already coefficients in other frames, it
is the user's responsibility to make sure that the coefficients
to be added are consistent with them.
OUTPUT:
- connection coefficients in the given frame, as an instance of the
class :class:`~sage.tensor.modules.comp.Components`; if such
connection coefficients did not exist previously, they are created.
See method :meth:`coef` for the storage convention of the connection
coefficients.
EXAMPLES:
Setting the coefficients of an affine connection w.r.t. some coordinate
frame::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: eX = X.frame(); eX
Coordinate frame (M, (d/dx,d/dy))
sage: nab.add_coef(eX)
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy))
sage: nab.add_coef(eX)[1,2,1] = x*y
sage: nab.display(eX)
Gam^x_yx = x*y
Since ``eX`` is the manifold's default vector frame, its mention may
be omitted::
sage: nab.add_coef()[1,2,1] = x*y
sage: nab.add_coef()
3-indices components w.r.t. Coordinate frame (M, (d/dx,d/dy))
sage: nab.add_coef()[1,2,1] = x*y
sage: nab.display()
Gam^x_yx = x*y
Adding connection coefficients w.r.t. to another vector frame::
sage: e = M.vector_frame('e')
sage: nab.add_coef(e)
3-indices components w.r.t. Vector frame (M, (e_1,e_2))
sage: nab.add_coef(e)[2,1,1] = x+y
sage: nab.add_coef(e)[2,1,2] = x-y
sage: nab.display(e)
Gam^2_11 = x + y
Gam^2_12 = x - y
The coefficients w.r.t. the frame ``eX`` have been kept::
sage: nab.display(eX)
Gam^x_yx = x*y
To delete them, use the method :meth:`set_coef` instead.
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._coefficients:
if frame not in self._domain._frames:
raise ValueError("the {} is not".format(frame) +
" a frame on the {}".format(self._domain))
self._coefficients[frame] = self._new_coef(frame)
self._del_derived() # deletes the derived quantities
return self._coefficients[frame]
def del_other_coef(self, frame=None):
r"""
Delete all the coefficients but those corresponding to ``frame``.
INPUT:
- ``frame`` -- (default: ``None``) vector frame, the connection
coefficients w.r.t. which are to be kept; if ``None``, the default
frame of the connection's domain is assumed.
EXAMPLES:
We first create two sets of connection coefficients::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: eX = X.frame()
sage: nab.set_coef(eX)[1,2,1] = x*y
sage: e = M.vector_frame('e')
sage: nab.add_coef(e)[2,1,1] = x+y
sage: nab.display(eX)
Gam^x_yx = x*y
sage: nab.display(e)
Gam^2_11 = x + y
Let us delete the connection coefficients w.r.t. all frames except for
frame ``eX``::
sage: nab.del_other_coef(eX)
sage: nab.display(eX)
Gam^x_yx = x*y
The connection coefficients w.r.t. frame ``e`` have indeed been
deleted::
sage: nab.display(e)
Traceback (most recent call last):
...
ValueError: no common frame found for the computation
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._coefficients:
raise ValueError("the coefficients w.r.t. {}".format(frame) +
" have not been defined")
to_be_deleted = []
for other_frame in self._coefficients:
if other_frame != frame:
to_be_deleted.append(other_frame)
for other_frame in to_be_deleted:
del self._coefficients[other_frame]
def __getitem__(self, args):
r"""
Return the connection coefficient w.r.t. some frame corresponding to
the given indices.
INPUT:
- ``args`` -- list of indices defining the coefficient; if ``[:]`` is
provided, all the coefficients are returned. The frame can be passed
as the first item of ``args``; if not, the default frame of the
connection's domain is assumed
OUTPUT:
- the connection coefficient corresponding to the specified frame and
indices, as an instance of
:class:`~sage.manifolds.chart_func.ChartFunction`
(or the list of all connection coefficients if ``args==[:]`` or
``args=[frame,:]``).
TESTS::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab.set_coef(X.frame())[1,2,1] = x*y
sage: nab.__getitem__((1,2,1))
x*y
sage: nab[1,2,1] # equivalent to above
x*y
sage: type(nab.__getitem__((1,2,1)))
<class 'sage.manifolds.chart_func.ChartFunctionRing_with_category.element_class'>
sage: nab.__getitem__((X.frame(),1,2,1))
x*y
sage: nab[X.frame(),1,2,1] # equivalent to above
x*y
Returning the full set of coefficients::
sage: nab.__getitem__(slice(None))
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
sage: nab[:] # equivalent to above
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
sage: nab.__getitem__((X.frame(), slice(None)))
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
sage: nab[X.frame(), :] # equivalent to above
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
Returning a scalar field::
sage: nab.__getitem__(([1,2,1]))
Scalar field on the 2-dimensional differentiable manifold M
sage: nab[[1,2,1]] # equivalent to above
Scalar field on the 2-dimensional differentiable manifold M
sage: nab.__getitem__(([X.frame(),1,2,1])).coord_function() is nab[1,2,1]
True
"""
if isinstance(args, list): # case of [[...]] syntax
if isinstance(args[0], (int, Integer, slice)):
frame = self._domain._def_frame
else:
frame = args[0]
args = args[1:]
else:
if isinstance(args, (int, Integer, slice)):
frame = self._domain._def_frame
elif not isinstance(args[0], (int, Integer, slice)):
frame = args[0]
args = args[1:]
if len(args) == 1:
args = args[0] # to accommodate for [e,:] syntax
else:
frame = self._domain._def_frame
return self.coef(frame)[args]
def __setitem__(self, args, value):
r"""
Set the connection coefficient w.r.t. some frame corresponding to the
given indices.
INPUT:
- ``args`` -- list of indices defining the coefficient; if ``[:]`` is
provided, all the coefficients are set. The frame can be passed
as the first item of ``args``; if not, the default frame of the
connection's domain is assumed
- ``value`` -- the value to be set or a list of values if
``args = [:]``
TESTS::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab.__setitem__((1,2,1), x*y)
sage: nab[:]
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
sage: nab[1,2,1] = x*y # equivalent to __setitem__ above
sage: nab[:]
[[[0, 0], [x*y, 0]], [[0, 0], [0, 0]]]
sage: nab.__setitem__((X.frame(),1,2,1), -x^2)
sage: nab[1,2,1]
-x^2
sage: nab[X.frame(), 1,2,1] = -x^2 # equivalent to __setitem__ above
sage: nab[1,2,1]
-x^2
Setting all the coefficients at once::
sage: nab.__setitem__(slice(None),
....: [[[-x^2, 0], [x*y, 0]], [[0, 1+y], [0, 0]]])
sage: nab[:]
[[[-x^2, 0], [x*y, 0]], [[0, y + 1], [0, 0]]]
sage: nab[:] = [[[-x^2, 0], [x*y, 0]], [[0, 1+y], [0, 0]]] # equivalent to above
sage: nab[:]
[[[-x^2, 0], [x*y, 0]], [[0, y + 1], [0, 0]]]
Providing a scalar field as value::
sage: f = M.scalar_field({X: x*y})
sage: nab.__setitem__((1,2,1), f)
sage: nab[1,2,1]
x*y
"""
if isinstance(args, list): # case of [[...]] syntax
if isinstance(args[0], (int, Integer, slice)):
frame = self._domain._def_frame
else:
frame = args[0]
args = args[1:]
else:
if isinstance(args, (int, Integer, slice)):
frame = self._domain._def_frame
elif not isinstance(args[0], (int, Integer, slice)):
frame = args[0]
args = args[1:]
if len(args) == 1:
args = args[0] # to accommodate for [e,:] syntax
else:
frame = self._domain._def_frame
self.set_coef(frame)[args] = value
def display(self, frame=None, chart=None, symbol=None, latex_symbol=None,
index_labels=None, index_latex_labels=None,
coordinate_labels=True, only_nonzero=True,
only_nonredundant=False):
r"""
Display all the connection coefficients w.r.t. to a given frame, one
per line.
The output is either text-formatted (console mode) or LaTeX-formatted
(notebook mode).
INPUT:
- ``frame`` -- (default: ``None``) vector frame relative to which the
connection coefficients are defined; if ``None``, the
default frame of the connection's domain is used
- ``chart`` -- (default: ``None``) chart specifying the coordinate
expression of the connection coefficients; if ``None``,
the default chart of the domain of ``frame`` is used
- ``symbol`` -- (default: ``None``) string specifying the
symbol of the connection coefficients; if ``None``, 'Gam' is used
- ``latex_symbol`` -- (default: ``None``) string specifying the LaTeX
symbol for the components; if ``None``, '\\Gamma' is used
- ``index_labels`` -- (default: ``None``) list of strings representing
the labels of each index; if ``None``, integer labels are used,
except if ``frame`` is a coordinate frame and ``coordinate_symbols``
is set to ``True``, in which case the coordinate symbols are used
- ``index_latex_labels`` -- (default: ``None``) list of strings
representing the LaTeX labels of each index; if ``None``, integer
labels are used, except if ``frame`` is a coordinate frame and
``coordinate_symbols`` is set to ``True``, in which case the
coordinate LaTeX symbols are used
- ``coordinate_labels`` -- (default: ``True``) boolean; if ``True``,
coordinate symbols are used by default (instead of integers) as
index labels whenever ``frame`` is a coordinate frame
- ``only_nonzero`` -- (default: ``True``) boolean; if ``True``, only
nonzero connection coefficients are displayed
- ``only_nonredundant`` -- (default: ``False``) boolean; if ``True``,
only nonredundant connection coefficients are displayed in case of
symmetries
EXAMPLES:
Coefficients of a connection on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z
By default, only the nonzero connection coefficients are displayed::
sage: nab.display()
Gam^x_xy = x^2
Gam^z_yz = y*z
sage: latex(nab.display())
\begin{array}{lcl} \Gamma_{ \phantom{\, x} \, x \, y }^{ \, x \phantom{\, x} \phantom{\, y} }
& = & x^{2} \\
\Gamma_{ \phantom{\, z} \, y \, z }^{ \, z \phantom{\, y} \phantom{\, z} }
& = & y z \end{array}
By default, the displayed connection coefficients are those w.r.t.
to the default frame of the connection's domain, so the above is
equivalent to::
sage: nab.display(frame=M.default_frame())
Gam^x_xy = x^2
Gam^z_yz = y*z
Since the default frame is a coordinate frame, coordinate symbols are
used to label the indices, but one may ask for integers instead::
sage: M.default_frame() is c_xyz.frame()
True
sage: nab.display(coordinate_labels=False)
Gam^1_12 = x^2
Gam^3_23 = y*z
The index labels can also be customized::
sage: nab.display(index_labels=['(1)', '(2)', '(3)'])
Gam^(1)_(1),(2) = x^2
Gam^(3)_(2),(3) = y*z
The symbol 'Gam' can be changed::
sage: nab.display(symbol='C', latex_symbol='C')
C^x_xy = x^2
C^z_yz = y*z
sage: latex(nab.display(symbol='C', latex_symbol='C'))
\begin{array}{lcl} C_{ \phantom{\, x} \, x \, y }^{ \, x \phantom{\, x} \phantom{\, y} }
& = & x^{2} \\
C_{ \phantom{\, z} \, y \, z }^{ \, z \phantom{\, y} \phantom{\, z} }
& = & y z \end{array}
Display of Christoffel symbols, skipping the redundancy associated
with the symmetry of the last two indices::
sage: M = Manifold(3, 'R^3', start_index=1)
sage: c_spher.<r,th,ph> = M.chart(r'r:(0,+oo) th:(0,pi):\theta ph:(0,2*pi):\phi')
sage: g = M.metric('g')
sage: g[1,1], g[2,2], g[3,3] = 1, r^2 , (r*sin(th))^2
sage: g.display()
g = dr*dr + r^2 dth*dth + r^2*sin(th)^2 dph*dph
sage: g.connection().display(only_nonredundant=True)
Gam^r_th,th = -r
Gam^r_ph,ph = -r*sin(th)^2
Gam^th_r,th = 1/r
Gam^th_ph,ph = -cos(th)*sin(th)
Gam^ph_r,ph = 1/r
Gam^ph_th,ph = cos(th)/sin(th)
By default, the parameter ``only_nonredundant`` is set to ``False``::
sage: g.connection().display()
Gam^r_th,th = -r
Gam^r_ph,ph = -r*sin(th)^2
Gam^th_r,th = 1/r
Gam^th_th,r = 1/r
Gam^th_ph,ph = -cos(th)*sin(th)
Gam^ph_r,ph = 1/r
Gam^ph_th,ph = cos(th)/sin(th)
Gam^ph_ph,r = 1/r
Gam^ph_ph,th = cos(th)/sin(th)
"""
from sage.misc.latex import latex
from sage.manifolds.differentiable.vectorframe import CoordFrame
if frame is None:
frame = self._domain.default_frame()
if chart is None:
chart = frame.domain().default_chart()
if symbol is None:
symbol = 'Gam'
if latex_symbol is None:
latex_symbol = r'\Gamma'
if index_labels is None and isinstance(frame, CoordFrame) and \
coordinate_labels:
ch = frame.chart()
index_labels = [str(z) for z in ch[:]]
index_latex_labels = [latex(z) for z in ch[:]]
return self.coef(frame=frame).display(symbol,
latex_symbol=latex_symbol, index_positions='udd',
index_labels=index_labels, index_latex_labels=index_latex_labels,
format_spec=chart, only_nonzero=only_nonzero,
only_nonredundant=only_nonredundant)
def restrict(self, subdomain):
r"""
Return the restriction of the connection to some subdomain.
If such restriction has not been defined yet, it is constructed here.
INPUT:
- ``subdomain`` -- open subset `U` of the connection's domain (must be
an instance of
:class:`~sage.manifolds.differentiable.manifold.DifferentiableManifold`)
OUTPUT:
- instance of :class:`AffineConnection` representing the restriction.
EXAMPLES:
Restriction of a connection on a 2-dimensional manifold::
sage: M = Manifold(2, 'M', start_index=1)
sage: c_xy.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,2], nab[2,1,1] = x^2, x+y
sage: nab[:]
[[[0, x^2], [0, 0]], [[x + y, 0], [0, 0]]]
sage: U = M.open_subset('U', coord_def={c_xy: x>0})
sage: nabU = nab.restrict(U) ; nabU
Affine connection nabla on the Open subset U of the 2-dimensional
differentiable manifold M
sage: nabU.domain()
Open subset U of the 2-dimensional differentiable manifold M
sage: nabU[:]
[[[0, x^2], [0, 0]], [[x + y, 0], [0, 0]]]
The result is cached::
sage: nab.restrict(U) is nabU
True
until the connection is modified::
sage: nab[1,2,2] = -y
sage: nab.restrict(U) is nabU
False
sage: nab.restrict(U)[:]
[[[0, x^2], [0, -y]], [[x + y, 0], [0, 0]]]
"""
if subdomain == self._domain:
return self
if subdomain not in self._restrictions:
if not subdomain.is_subset(self._domain):
raise ValueError("The provided domains is not a subset of " +
"the connection's domain.")
resu = AffineConnection(subdomain, name=self._name,
latex_name=self._latex_name)
for frame in self._coefficients:
for sframe in subdomain._top_frames:
if sframe in frame._subframes:
comp_store = self._coefficients[frame]._comp
scoef = resu._new_coef(sframe)
scomp_store = scoef._comp
# the coefficients of the restriction are evaluated
# index by index:
for ind, value in comp_store.items():
scomp_store[ind] = value.restrict(sframe._domain)
resu._coefficients[sframe] = scoef
if self._torsion is not None:
resu._torsion = self._torsion.restrict(subdomain)
if self._riemann is not None:
resu._riemann = self._riemann.restrict(subdomain)
if self._ricci is not None:
resu._ricci = self._ricci.restrict(subdomain)
self._restrictions[subdomain] = resu
return self._restrictions[subdomain]
def _common_frame(self, other):
r"""
Find a common vector frame for the coefficients of ``self`` and
the components of ``other``.
In case of multiple common frames, the default frame of ``self``'s
domain is privileged.
INPUT:
- ``other`` -- a tensor field on parallelizable domain, as an
instance of
:class:`~sage.manifolds.differentiable.tensorfield_paral.TensorFieldParal`
OUTPUT:
- common frame; if no common frame is found, None is returned.
TESTS::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab[1,2,1] = x*y
sage: v = M.vector_field()
sage: v[:] = [-y, x]
sage: nab._common_frame(v)
Coordinate frame (M, (d/dx,d/dy))
sage: e = M.vector_frame('e')
sage: u = M.vector_field()
sage: u[e,:] = [-3, 2]
sage: nab._common_frame(u) # no common frame is found
"""
# The domain of search is restricted to other._domain:
dom = other._domain
# 1/ Does each object have components on the domain's default frame ?
def_frame = dom._def_frame
if def_frame in self._coefficients and def_frame in other._components:
return def_frame
# 2/ Search for a common frame among the existing components, i.e.
# without performing any component transformation.
# -------------------------------------------------------------
for frame in self._coefficients:
if frame in other._components:
return frame
# 3/ Search for a common frame among the subframes of self's frames:
# --------------------------------------------------------------
for frame in self._coefficients:
for oframe in other._components:
if oframe in frame._subframes:
self.coef(oframe) # update the coefficients of self in oframe
return oframe
#
# 4/ Search for a common frame via one component transformation
# ----------------------------------------------------------
# If this point is reached, it is necessary to perform at least
# one component transformation to get a common frame
for frame in self._coefficients:
for oframe in other._components:
if (oframe, frame) in dom._frame_changes:
other.comp(frame, from_basis=oframe)
return frame
# 5/ Search for a common frame via one component transformation to
# a subframe of self's frames:
# -------------------------------------------------------------
for frame in self._coefficients:
for oframe in other._components:
for sframe in frame._subframes:
if (oframe, sframe) in dom._frame_changes:
self.coef(sframe)
other.comp(sframe, from_basis=oframe)
return sframe
#
# If this point is reached, no common frame could be found, even at
# the price of a component transformation:
return None
def __call__(self, tensor):
r"""
Action of the connection on a tensor field.
INPUT:
- ``tensor`` -- a tensor field `T`, of type `(k,\ell)`
OUTPUT:
- tensor field `\nabla T`.
TESTS::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab[1,2,1] = x*y
sage: v = M.vector_field()
sage: v[:] = [-y, x]
sage: nab.__call__(v)
Tensor field of type (1,1) on the 2-dimensional differentiable
manifold M
See documentation of
:class:`~sage.manifolds.differentiable.affine_connection.AffineConnection`
for more examples.
"""
from sage.manifolds.differentiable.tensorfield_paral import \
TensorFieldParal
from sage.tensor.modules.format_utilities import format_unop_latex
dom_resu = self._domain.intersection(tensor._domain)
tensor_r = tensor.restrict(dom_resu)
if tensor_r._tensor_type == (0,0): # scalar field case
return tensor_r.differential()
if isinstance(tensor_r, TensorFieldParal):
return self._derive_paral(tensor_r)
resu_rst = []
for dom, rst in tensor_r._restrictions.items():
# the computation is performed only if dom is not a subdomain
# of another restriction:
for odom in tensor_r._restrictions:
if dom in odom._subsets and dom is not odom:
break
else:
# dom is a not a subdomain and the computation is performed:
resu_rst.append(self.__call__(rst))
tensor_type_resu = (tensor_r._tensor_type[0],
tensor_r._tensor_type[1]+1)
if tensor_r._name is None:
name_resu = None
else:
name_resu = self._name + '(' + tensor_r._name + ')'
if tensor_r._latex_name is None:
latex_name_resu = None
else:
latex_name_resu = format_unop_latex(self._latex_name + ' ',
tensor_r._latex_name)
vmodule = dom_resu.vector_field_module()
resu = vmodule.tensor(tensor_type_resu, name=name_resu,
latex_name=latex_name_resu,
sym=resu_rst[0]._sym,
antisym=resu_rst[0]._antisym)
for rst in resu_rst:
resu._restrictions[rst._domain] = rst
return resu
def _derive_paral(self, tensor):
r"""
Action of the connection on a tensor field on a parallelizable domain.
INPUT:
- ``tensor`` -- a tensor field `T`, of type `(k,\ell)`
OUTPUT:
- tensor field `\nabla T`.
TESTS::
sage: M = Manifold(2, 'M', start_index=1)
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: nab[1,2,1] = x*y
sage: v = M.vector_field()
sage: v[:] = [-y, x]
sage: nab._derive_paral(v)
Tensor field of type (1,1) on the 2-dimensional differentiable
manifold M
"""
from sage.manifolds.differentiable.scalarfield import DiffScalarField
from sage.tensor.modules.comp import Components, CompWithSym
from sage.tensor.modules.format_utilities import format_unop_latex
manif = self._domain
tdom = tensor._domain
frame = self._common_frame(tensor)
if frame is None:
raise ValueError("no common frame found for the computation")
# Component computation in the common frame:
tc = tensor._components[frame]
gam = self._coefficients[frame]
if not tensor._sym and not tensor._antisym:
resc = Components(tdom.scalar_field_algebra(), frame,
tensor._tensor_rank+1,
start_index=self._domain._sindex,
output_formatter=DiffScalarField.coord_function)
else:
resc = CompWithSym(tdom.scalar_field_algebra(), frame,
tensor._tensor_rank+1,
start_index=self._domain._sindex,
output_formatter=DiffScalarField.coord_function,
sym=tensor._sym, antisym=tensor._antisym)
n_con = tensor._tensor_type[0]
n_cov = tensor._tensor_type[1]
if Parallelism().get('tensor') != 1:
# parallel computation
# !!!!! Seems to work only when a frame is chosen !!!!!!
nproc = Parallelism().get('tensor')
lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
ind_list = list(resc.non_redundant_index_generator())
ind_step = max(1,int(len(ind_list)/nproc/2))
local_list = lol(ind_list,ind_step)
# definition of the list of input parameters
listParalInput = []
for ind_part in local_list:
listParalInput.append((ind_part,tc,gam,frame,n_con,
tensor._tensor_rank,manif))
# definition of the parallel function
@parallel(p_iter='multiprocessing',ncpus=nproc)
def make_CovDerivative(ind_part,tc,gam,frame,n_con,rank,manif):
partial = []
for ind in ind_part:
p = ind[-1] # derivation index
ind0 = ind[:-1]
rsum = frame[p](tc[[ind0]])
# loop on contravariant indices:
for k in range(n_con):
for i in manif.irange():
indk = list(ind0)
indk[k] = i
rsum += gam[[ind0[k], i, p]] * tc[[indk]]
# loop on covariant indices:
for k in range(n_con, rank):
for i in manif.irange():
indk = list(ind0)
indk[k] = i
rsum -= gam[[i, ind0[k], p]] * tc[[indk]]
partial.append([ind,rsum])
return partial
# Computation and Assignation of values
for ii,val in make_CovDerivative(listParalInput):
for jj in val:
resc[[jj[0]]] = jj[1]
else:
# sequential
for ind in resc.non_redundant_index_generator():
p = ind[-1] # derivation index
ind0 = ind[:-1]
rsum = frame[p](tc[[ind0]])
# loop on contravariant indices:
for k in range(n_con):
for i in manif.irange():
indk = list(ind0)
indk[k] = i
rsum += gam[[ind0[k], i, p]] * tc[[indk]]
# loop on covariant indices:
for k in range(n_con, tensor._tensor_rank):
for i in manif.irange():
indk = list(ind0)
indk[k] = i
rsum -= gam[[i, ind0[k], p]] * tc[[indk]]
resc[[ind]] = rsum
# Resulting tensor field
if tensor._name is None:
name_resu = None
else:
name_resu = self._name + '(' + tensor._name + ')'
if tensor._latex_name is None:
latex_name_resu = None
else:
latex_name_resu = format_unop_latex(self._latex_name + ' ',
tensor._latex_name)
return tdom.vector_field_module().tensor_from_comp((n_con, n_cov+1),
resc, name=name_resu, latex_name=latex_name_resu)
def torsion(self):
r"""
Return the connection's torsion tensor.
The torsion tensor is the tensor field `T` of type (1,2) defined by
.. MATH::
T(\omega, u, v) = \left\langle \omega, \nabla_u v - \nabla_v u
- [u, v] \right\rangle
for any 1-form `\omega` and any vector fields `u` and `v`.
OUTPUT:
- the torsion tensor `T`, as an instance of
:class:`~sage.manifolds.differentiable.tensorfield.TensorField`
EXAMPLES:
Torsion of an affine connection on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z # Gamma^1_{12} = x^2, Gamma^3_{23} = yz
sage: t = nab.torsion() ; t
Tensor field of type (1,2) on the 3-dimensional differentiable
manifold M
sage: t.symmetries()
no symmetry; antisymmetry: (1, 2)
sage: t[:]
[[[0, -x^2, 0], [x^2, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, -y*z], [0, y*z, 0]]]
The torsion expresses the lack of commutativity of two successive
derivatives of a scalar field::
sage: f = M.scalar_field(x*z^2 + y^2 - z^2, name='f')
sage: DDf = nab(nab(f)) ; DDf
Tensor field nabla(df) of type (0,2) on the 3-dimensional
differentiable manifold M
sage: DDf.antisymmetrize()[:] # two successive derivatives do not commute:
[ 0 -1/2*x^2*z^2 0]
[ 1/2*x^2*z^2 0 -(x - 1)*y*z^2]
[ 0 (x - 1)*y*z^2 0]
sage: 2*DDf.antisymmetrize() == nab.torsion().contract(0,nab(f))
True
The above identity is the standard formula
.. MATH::
\nabla_j \nabla_i \, f - \nabla_i \nabla_j \, f = T^k_{\ \, ij} \nabla_k \, f ,
where the `T^k_{\ \, ij}`'s are the components of the torsion tensor.
The result is cached::
sage: nab.torsion() is t
True
as long as the connection remains unchanged::
sage: nab[2,1,3] = 1+x # changing the connection
sage: nab.torsion() is t # a new computation of the torsion has been made
False
sage: (nab.torsion() - t).display()
(-x - 1) d/dy*dx*dz + (x + 1) d/dy*dz*dx
Another example: torsion of some connection on a non-parallelizable
2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y), intersection_name='W',
....: restrictions1= x>0, restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: W = U.intersection(V)
sage: eU = c_xy.frame() ; eV = c_uv.frame()
sage: c_xyW = c_xy.restrict(W) ; c_uvW = c_uv.restrict(W)
sage: eUW = c_xyW.frame() ; eVW = c_uvW.frame()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[0,0,0], nab[0,1,0], nab[1,0,1] = x, x-y, x*y
sage: for i in M.irange():
....: for j in M.irange():
....: for k in M.irange():
....: nab.add_coef(eV)[i,j,k] = nab.coef(eVW)[i,j,k,c_uvW].expr()
sage: t = nab.torsion() ; t
Tensor field of type (1,2) on the 2-dimensional differentiable
manifold M
sage: t.parent()
Module T^(1,2)(M) of type-(1,2) tensors fields on the 2-dimensional
differentiable manifold M
sage: t[eU,:]
[[[0, x - y], [-x + y, 0]], [[0, -x*y], [x*y, 0]]]
sage: t[eV,:]
[[[0, 1/8*u^2 - 1/8*v^2 - 1/2*v], [-1/8*u^2 + 1/8*v^2 + 1/2*v, 0]],
[[0, -1/8*u^2 + 1/8*v^2 - 1/2*v], [1/8*u^2 - 1/8*v^2 + 1/2*v, 0]]]
Check of the torsion formula::
sage: f = M.scalar_field({c_xy: (x+y)^2, c_uv: u^2}, name='f')
sage: DDf = nab(nab(f)) ; DDf
Tensor field nabla(df) of type (0,2) on the 2-dimensional
differentiable manifold M
sage: DDf.antisymmetrize().display(eU)
(-x^2*y - (x + 1)*y^2 + x^2) dx/\dy
sage: DDf.antisymmetrize().display(eV)
(1/8*u^3 - 1/8*u*v^2 - 1/2*u*v) du/\dv
sage: 2*DDf.antisymmetrize() == nab(f).contract(nab.torsion())
True
"""
if self._torsion is None:
manif = self._domain
resu = self._domain.tensor_field(1, 2, antisym=(1,2))
for frame, gam in self._coefficients.items():
sc = frame.structure_coeff()
res = resu.add_comp(frame)
for k in manif.irange():
for i in manif.irange():
for j in manif.irange(start=i+1):
res[[k,i,j]] = gam[[k,j,i]] - gam[[k,i,j]] - \
sc[[k,i,j]]
self._torsion = resu
return self._torsion
def riemann(self):
r"""
Return the connection's Riemann curvature tensor.
The *Riemann curvature tensor* is the tensor field `R` of type (1,3)
defined by
.. MATH::
R(\omega, w, u, v) = \left\langle \omega, \nabla_u \nabla_v w
- \nabla_v \nabla_u w - \nabla_{[u, v]} w \right\rangle
for any 1-form `\omega` and any vector fields `u`, `v` and `w`.
OUTPUT:
- the Riemann curvature tensor `R`, as an instance of
:class:`~sage.manifolds.differentiable.tensorfield.TensorField`
EXAMPLES:
Curvature of an affine connection on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla') ; nab
Affine connection nabla on the 3-dimensional differentiable
manifold M
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z # Gamma^1_{12} = x^2, Gamma^3_{23} = yz
sage: r = nab.riemann() ; r
Tensor field of type (1,3) on the 3-dimensional differentiable
manifold M
sage: r.parent()
Free module T^(1,3)(M) of type-(1,3) tensors fields on the
3-dimensional differentiable manifold M
By construction, the Riemann tensor is antisymmetric with respect to
its last two arguments (denoted `u` and `v` in the definition above),
which are at positions 2 and 3 (the first argument being at position
0)::
sage: r.symmetries()
no symmetry; antisymmetry: (2, 3)
The components::
sage: r[:]
[[[[0, 2*x, 0], [-2*x, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]],
[[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, z], [0, -z, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]]]]
The result is cached (until the connection is modified via
:meth:`set_coef` or :meth:`add_coef`)::
sage: nab.riemann() is r
True
Another example: Riemann curvature tensor of some connection on a
non-parallelizable 2-dimensional manifold::
sage: M = Manifold(2, 'M')
sage: U = M.open_subset('U') ; V = M.open_subset('V')
sage: M.declare_union(U,V) # M is the union of U and V
sage: c_xy.<x,y> = U.chart() ; c_uv.<u,v> = V.chart()
sage: transf = c_xy.transition_map(c_uv, (x+y, x-y), intersection_name='W',
....: restrictions1= x>0, restrictions2= u+v>0)
sage: inv = transf.inverse()
sage: W = U.intersection(V)
sage: eU = c_xy.frame() ; eV = c_uv.frame()
sage: c_xyW = c_xy.restrict(W) ; c_uvW = c_uv.restrict(W)
sage: eUW = c_xyW.frame() ; eVW = c_uvW.frame()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[0,0,0], nab[0,1,0], nab[1,0,1] = x, x-y, x*y
sage: for i in M.irange():
....: for j in M.irange():
....: for k in M.irange():
....: nab.add_coef(eV)[i,j,k] = nab.coef(eVW)[i,j,k,c_uvW].expr()
sage: r = nab.riemann() ; r
Tensor field of type (1,3) on the 2-dimensional differentiable
manifold M
sage: r.parent()
Module T^(1,3)(M) of type-(1,3) tensors fields on the 2-dimensional
differentiable manifold M
sage: r.display(eU)
(x^2*y - x*y^2) d/dx*dx*dx*dy + (-x^2*y + x*y^2) d/dx*dx*dy*dx + d/dx*dy*dx*dy
- d/dx*dy*dy*dx - (x^2 - 1)*y d/dy*dx*dx*dy + (x^2 - 1)*y d/dy*dx*dy*dx
+ (-x^2*y + x*y^2) d/dy*dy*dx*dy + (x^2*y - x*y^2) d/dy*dy*dy*dx
sage: r.display(eV)
(1/32*u^3 - 1/32*u*v^2 - 1/32*v^3 + 1/32*(u^2 + 4)*v - 1/8*u - 1/4) d/du*du*du*dv
+ (-1/32*u^3 + 1/32*u*v^2 + 1/32*v^3 - 1/32*(u^2 + 4)*v + 1/8*u + 1/4) d/du*du*dv*du
+ (1/32*u^3 - 1/32*u*v^2 + 3/32*v^3 - 1/32*(3*u^2 - 4)*v - 1/8*u + 1/4) d/du*dv*du*dv
+ (-1/32*u^3 + 1/32*u*v^2 - 3/32*v^3 + 1/32*(3*u^2 - 4)*v + 1/8*u - 1/4) d/du*dv*dv*du
+ (-1/32*u^3 + 1/32*u*v^2 + 5/32*v^3 - 1/32*(5*u^2 + 4)*v + 1/8*u - 1/4) d/dv*du*du*dv
+ (1/32*u^3 - 1/32*u*v^2 - 5/32*v^3 + 1/32*(5*u^2 + 4)*v - 1/8*u + 1/4) d/dv*du*dv*du
+ (-1/32*u^3 + 1/32*u*v^2 + 1/32*v^3 - 1/32*(u^2 + 4)*v + 1/8*u + 1/4) d/dv*dv*du*dv
+ (1/32*u^3 - 1/32*u*v^2 - 1/32*v^3 + 1/32*(u^2 + 4)*v - 1/8*u - 1/4) d/dv*dv*dv*du
The same computation parallelized on 2 cores::
sage: Parallelism().set(nproc=2)
sage: r_backup = r
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[0,0,0], nab[0,1,0], nab[1,0,1] = x, x-y, x*y
sage: for i in M.irange():
....: for j in M.irange():
....: for k in M.irange():
....: nab.add_coef(eV)[i,j,k] = nab.coef(eVW)[i,j,k,c_uvW].expr()
sage: r = nab.riemann() ; r
Tensor field of type (1,3) on the 2-dimensional differentiable
manifold M
sage: r.parent()
Module T^(1,3)(M) of type-(1,3) tensors fields on the 2-dimensional
differentiable manifold M
sage: r == r_backup
True
sage: Parallelism().set(nproc=1) # switch off parallelization
"""
if self._riemann is None:
manif = self._domain
resu = self._domain.tensor_field(1, 3, antisym=(2,3))
for frame, gam in self._coefficients.items():
# The computation is performed only on the top frames:
for oframe in self._coefficients:
if frame in oframe._subframes and frame is not oframe:
break
else:
# frame in not a subframe and the computation is performed:
sc = frame.structure_coeff()
gam_gam = gam.contract(1, gam, 0)
gam_sc = gam.contract(2, sc, 0)
res = resu.add_comp(frame)
if Parallelism().get('tensor') != 1:
# parallel computation
nproc = Parallelism().get('tensor')
lol = lambda lst, sz: [lst[i:i+sz] for i in range(0,
len(lst), sz)]
ind_list = []
for i in manif.irange():
for j in manif.irange():
ind_list.append((i,j))
ind_step = max(1,int(len(ind_list)/nproc/2))
local_list = lol(ind_list,ind_step)
# definition of the list of input parameters
listParalInput = []
for ind_part in local_list:
listParalInput.append((frame,gam,gam_gam,gam_sc,
manif.irange,ind_part))
# definition of the parallel function
@parallel(p_iter='multiprocessing',ncpus=nproc)
def make_Reim(frame,gam,gam_gam,gam_sc,indices,
local_list_ij):
partial = []
for i,j in local_list_ij:
for k in indices():
for l in indices(start=k+1):
partial.append([i,j,k,l,
frame[k](gam[[i,j,l]]) - \
frame[l](gam[[i,j,k]]) + \
gam_gam[[i,k,j,l]] - \
gam_gam[[i,l,j,k]] - \
gam_sc[[i,j,k,l]]]
)
return partial
# Computation and assignation of values
for ii,val in make_Reim(listParalInput):
for jj in val:
res[jj[0],jj[1],jj[2],jj[3]] = jj[4]
else:
# sequential
for i in manif.irange():
for j in manif.irange():
for k in manif.irange():
# antisymmetry of the Riemann tensor taken
# into account by l>k:
for l in manif.irange(start=k+1):
res[i,j,k,l] = frame[k](gam[[i,j,l]]) - \
frame[l](gam[[i,j,k]]) + \
gam_gam[[i,k,j,l]] - \
gam_gam[[i,l,j,k]] - \
gam_sc[[i,j,k,l]]
self._riemann = resu
return self._riemann
def ricci(self):
r"""
Return the connection's Ricci tensor.
The *Ricci tensor* is the tensor field `Ric` of type (0,2)
defined from the Riemann curvature tensor `R` by
.. MATH::
Ric(u, v) = R(e^i, u, e_i, v)
for any vector fields `u` and `v`, `(e_i)` being any vector frame and
`(e^i)` the dual coframe.
OUTPUT:
- the Ricci tensor `Ric`, as an instance of
:class:`~sage.manifolds.differentiable.tensorfield.TensorField`
EXAMPLES:
Ricci tensor of an affine connection on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla') ; nab
Affine connection nabla on the 3-dimensional differentiable
manifold M
sage: nab[1,1,2], nab[3,2,3] = x^2, y*z # Gamma^1_{12} = x^2, Gamma^3_{23} = yz
sage: r = nab.ricci() ; r
Tensor field of type (0,2) on the 3-dimensional differentiable
manifold M
sage: r[:]
[ 0 2*x 0]
[ 0 -z 0]
[ 0 0 0]
The result is cached (until the connection is modified via
:meth:`set_coef` or :meth:`add_coef`)::
sage: nab.ricci() is r
True
"""
if self._ricci is None:
self._ricci = self.riemann().trace(0,2)
return self._ricci
def connection_form(self, i, j, frame=None):
r"""
Return the connection 1-form corresponding to the given index and
vector frame.
The *connection 1-forms* with respect to the frame `(e_i)` are the
`n^2` 1-forms `\omega^i_{\ \, j}` defined by
.. MATH::
\nabla_v e_j = \langle \omega^i_{\ \, j}, v \rangle
\, e_i
for any vector `v`.
The components of `\omega^i_{\ \, j}` in the coframe `(e^i)` dual to
`(e_i)` are nothing but the connection coefficients `\Gamma^i_{\ \, jk}`
relative to the frame `(e_i)`:
.. MATH::
\omega^i_{\ \, j} = \Gamma^i_{\ \, jk} e^k
INPUT:
- ``i``, ``j`` -- indices identifying the 1-form `\omega^i_{\ \, j}`
- ``frame`` -- (default: ``None``) vector frame relative to which the
connection 1-forms are defined; if ``None``, the default frame of the
connection's domain is assumed.
OUTPUT:
- the 1-form `\omega^i_{\ \, j}`, as an instance of
:class:`~sage.manifolds.differentiable.diff_form.DiffForm`
EXAMPLES:
Connection 1-forms on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,1], nab[1,1,2], nab[1,1,3] = x*y*z, x^2, -y*z
sage: nab[1,2,3], nab[1,3,1], nab[1,3,2] = -x^3, y^2*z, y^2-x^2
sage: nab[2,1,1], nab[2,1,2], nab[2,2,1] = z^2, x*y*z^2, -x^2
sage: nab[2,3,1], nab[2,3,3], nab[3,1,2] = x^2+y^2+z^2, y^2-z^2, x*y+z^2
sage: nab[3,2,1], nab[3,2,2], nab[3,3,3] = x*y+z, z^3 -y^2, x*z^2 - z*y^2
sage: nab.connection_form(1,1) # connection 1-form (i,j)=(1,1) w.r.t. M's default frame
1-form nabla connection 1-form (1,1) on the 3-dimensional
differentiable manifold M
sage: nab.connection_form(1,1)[:]
[x*y*z, x^2, -y*z]
The result is cached (until the connection is modified via
:meth:`set_coef` or :meth:`add_coef`)::
sage: nab.connection_form(1,1) is nab.connection_form(1,1)
True
Connection 1-forms w.r.t. a non-holonomic frame::
sage: ch_basis = M.automorphism_field()
sage: ch_basis[1,1], ch_basis[2,2], ch_basis[3,3] = y, z, x
sage: e = M.default_frame().new_frame(ch_basis, 'e')
sage: e[1][:], e[2][:], e[3][:]
([y, 0, 0], [0, z, 0], [0, 0, x])
sage: nab.connection_form(1,1,e)
1-form nabla connection 1-form (1,1) on the 3-dimensional
differentiable manifold M
sage: nab.connection_form(1,1,e).comp(e)[:]
[x*y^2*z, (x^2*y + 1)*z/y, -x*y*z]
Check of the formula `\omega^i_{\ \, j} = \Gamma^i_{\ \, jk} e^k`:
First on the manifold's default frame (d/dx, d/dy, d:dz)::
sage: dx = M.default_frame().coframe() ; dx
Coordinate coframe (M, (dx,dy,dz))
sage: check = []
sage: for i in M.irange():
....: for j in M.irange():
....: check.append( nab.connection_form(i,j) == \
....: sum( nab[[i,j,k]]*dx[k] for k in M.irange() ) )
sage: check
[True, True, True, True, True, True, True, True, True]
Then on the frame e::
sage: ef = e.coframe() ; ef
Coframe (M, (e^1,e^2,e^3))
sage: check = []
sage: for i in M.irange():
....: for j in M.irange():
....: s = nab.connection_form(i,j,e).comp(c_xyz.frame(), from_basis=e)
....: check.append( nab.connection_form(i,j,e) == sum( nab.coef(e)[[i,j,k]]*ef[k] for k in M.irange() ) )
sage: check
[True, True, True, True, True, True, True, True, True]
Check of the formula
`\nabla_v e_j = \langle \omega^i_{\ \, j}, v \rangle e_i`::
sage: v = M.vector_field()
sage: v[:] = (x*y, z^2-3*x, z+2*y)
sage: b = M.default_frame()
sage: for j in M.irange(): # check on M's default frame
....: nab(b[j]).contract(v) == \
....: sum( nab.connection_form(i,j)(v)*b[i] for i in M.irange())
True
True
True
sage: for j in M.irange(): # check on frame e
....: nab(e[j]).contract(v) == \
....: sum( nab.connection_form(i,j,e)(v)*e[i] for i in M.irange())
True
True
True
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._connection_forms:
forms = {}
frame_dom = frame.domain()
coef_frame = self.coef(frame)
for i1 in self._domain.irange():
for j1 in self._domain.irange():
name = self._name + " connection 1-form (" + str(i1) + \
"," + str(j1) + ")"
latex_name = r"\omega^" + str(i1) + r"_{\ \, " + \
str(j1) + "}"
omega = frame_dom.one_form(name=name,
latex_name=latex_name)
comega = omega.set_comp(frame)
for k in self._domain.irange():
comega[k] = coef_frame[[i1,j1,k]]
forms[(i1,j1)] = omega
self._connection_forms[frame] = forms
return self._connection_forms[frame][(i,j)]
def torsion_form(self, i, frame=None):
r"""
Return the torsion 2-form corresponding to the given index and
vector frame.
The *torsion 2-forms* with respect to the frame `(e_i)` are the
`n` 2-forms `\theta^i` defined by
.. MATH::
\theta^i(u,v) = T(e^i, u, v)
where `T` is the connection's torsion tensor (cf. :meth:`torsion`),
`(e^i)` is the coframe dual to `(e_i)` and `(u,v)` is a generic pair of
vectors.
INPUT:
- ``i`` -- index identifying the 2-form `\theta^i`
- ``frame`` -- (default: ``None``) vector frame relative to which the
torsion 2-forms are defined; if ``None``, the default frame of the
connection's domain is assumed.
OUTPUT:
- the 2-form `\theta^i`, as an instance of
:class:`~sage.manifolds.differentiable.diff_form.DiffForm`
EXAMPLES:
Torsion 2-forms on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,1], nab[1,1,2], nab[1,1,3] = x*y*z, x^2, -y*z
sage: nab[1,2,3], nab[1,3,1], nab[1,3,2] = -x^3, y^2*z, y^2-x^2
sage: nab[2,1,1], nab[2,1,2], nab[2,2,1] = z^2, x*y*z^2, -x^2
sage: nab[2,3,1], nab[2,3,3], nab[3,1,2] = x^2+y^2+z^2, y^2-z^2, x*y+z^2
sage: nab[3,2,1], nab[3,2,2], nab[3,3,3] = x*y+z, z^3 -y^2, x*z^2 - z*y^2
sage: nab.torsion_form(1)
2-form torsion (1) of connection nabla w.r.t. Coordinate frame
(M, (d/dx,d/dy,d/dz)) on the 3-dimensional differentiable manifold M
sage: nab.torsion_form(1)[:]
[ 0 -x^2 (y^2 + y)*z]
[ x^2 0 x^3 - x^2 + y^2]
[ -(y^2 + y)*z -x^3 + x^2 - y^2 0]
Torsion 2-forms w.r.t. a non-holonomic frame::
sage: ch_basis = M.automorphism_field()
sage: ch_basis[1,1], ch_basis[2,2], ch_basis[3,3] = y, z, x
sage: e = M.default_frame().new_frame(ch_basis, 'e')
sage: e[1][:], e[2][:], e[3][:]
([y, 0, 0], [0, z, 0], [0, 0, x])
sage: ef = e.coframe()
sage: ef[1][:], ef[2][:], ef[3][:]
([1/y, 0, 0], [0, 1/z, 0], [0, 0, 1/x])
sage: nab.torsion_form(1, e)
2-form torsion (1) of connection nabla w.r.t. Vector frame
(M, (e_1,e_2,e_3)) on the 3-dimensional differentiable manifold M
sage: nab.torsion_form(1, e).comp(e)[:]
[ 0 -x^2*z (x*y^2 + x*y)*z]
[ x^2*z 0 (x^4 - x^3 + x*y^2)*z/y]
[ -(x*y^2 + x*y)*z -(x^4 - x^3 + x*y^2)*z/y 0]
Cartan's first structure equation is
.. MATH::
\theta^i = \mathrm{d} e^i + \omega^i_{\ \, j} \wedge e^j
where the `\omega^i_{\ \, j}`'s are the connection 1-forms (cf.
:meth:`connection_form`). Let us check it on the frame e::
sage: for i in M.irange(): # long time
....: nab.torsion_form(i, e) == ef[i].exterior_derivative() + \
....: sum(nab.connection_form(i,j,e).wedge(ef[j]) for j in M.irange())
True
True
True
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._torsion_forms:
forms = {}
frame_dom = frame.domain()
torsion_comp = self.torsion().comp(frame)
for i1 in self._domain.irange():
name = "torsion ({}) of connection ".format(i1) + \
self._name + " w.r.t. {}".format(frame)
latex_name = r"\theta^" + str(i1)
theta = frame_dom.diff_form(2, name=name,
latex_name=latex_name)
ctheta = theta.set_comp(frame)
for k in self._domain.irange():
for l in self._domain.irange(start=k+1):
ctheta[k,l] = torsion_comp[[i1,k,l]]
forms[i1] = theta
self._torsion_forms[frame] = forms
return self._torsion_forms[frame][i]
def curvature_form(self, i, j, frame=None):
r"""
Return the curvature 2-form corresponding to the given index and
vector frame.
The *curvature 2-forms* with respect to the frame `(e_i)` are the
`n^2` 2-forms `\Omega^i_{\ \, j}` defined by
.. MATH::
\Omega^i_{\ \, j}(u,v) = R(e^i, e_j, u, v)
where `R` is the connection's Riemann curvature tensor (cf.
:meth:`riemann`), `(e^i)` is the coframe dual to `(e_i)` and `(u,v)` is
a generic pair of vectors.
INPUT:
- ``i``, ``j`` -- indices identifying the 2-form `\Omega^i_{\ \, j}`
- ``frame`` -- (default: ``None``) vector frame relative to which the
curvature 2-forms are defined; if ``None``, the default frame
of the connection's domain is assumed.
OUTPUT:
- the 2-form `\Omega^i_{\ \, j}`, as an instance of
:class:`~sage.manifolds.differentiable.diff_form.DiffForm`
EXAMPLES:
Curvature 2-forms on a 3-dimensional manifold::
sage: M = Manifold(3, 'M', start_index=1)
sage: c_xyz.<x,y,z> = M.chart()
sage: nab = M.affine_connection('nabla', r'\nabla')
sage: nab[1,1,1], nab[1,1,2], nab[1,1,3] = x*y*z, x^2, -y*z
sage: nab[1,2,3], nab[1,3,1], nab[1,3,2] = -x^3, y^2*z, y^2-x^2
sage: nab[2,1,1], nab[2,1,2], nab[2,2,1] = z^2, x*y*z^2, -x^2
sage: nab[2,3,1], nab[2,3,3], nab[3,1,2] = x^2+y^2+z^2, y^2-z^2, x*y+z^2
sage: nab[3,2,1], nab[3,2,2], nab[3,3,3] = x*y+z, z^3 -y^2, x*z^2 - z*y^2
sage: nab.curvature_form(1,1) # long time
2-form curvature (1,1) of connection nabla w.r.t. Coordinate frame
(M, (d/dx,d/dy,d/dz)) on the 3-dimensional differentiable manifold M
sage: nab.curvature_form(1,1).display() # long time (if above is skipped)
curvature (1,1) of connection nabla w.r.t. Coordinate frame
(M, (d/dx,d/dy,d/dz)) = (y^2*z^3 + (x*y^3 - x)*z + 2*x) dx/\dy
+ (x^3*z^2 - x*y) dx/\dz + (x^4*y*z^2 - z) dy/\dz
Curvature 2-forms w.r.t. a non-holonomic frame::
sage: ch_basis = M.automorphism_field()
sage: ch_basis[1,1], ch_basis[2,2], ch_basis[3,3] = y, z, x
sage: e = M.default_frame().new_frame(ch_basis, 'e')
sage: e[1].display(), e[2].display(), e[3].display()
(e_1 = y d/dx, e_2 = z d/dy, e_3 = x d/dz)
sage: ef = e.coframe()
sage: ef[1].display(), ef[2].display(), ef[3].display()
(e^1 = 1/y dx, e^2 = 1/z dy, e^3 = 1/x dz)
sage: nab.curvature_form(1,1,e) # long time
2-form curvature (1,1) of connection nabla w.r.t. Vector frame
(M, (e_1,e_2,e_3)) on the 3-dimensional differentiable manifold M
sage: nab.curvature_form(1,1,e).display(e) # long time (if above is skipped)
curvature (1,1) of connection nabla w.r.t. Vector frame
(M, (e_1,e_2,e_3)) =
(y^3*z^4 + 2*x*y*z + (x*y^4 - x*y)*z^2) e^1/\e^2
+ (x^4*y*z^2 - x^2*y^2) e^1/\e^3 + (x^5*y*z^3 - x*z^2) e^2/\e^3
Cartan's second structure equation is
.. MATH::
\Omega^i_{\ \, j} = \mathrm{d} \omega^i_{\ \, j}
+ \omega^i_{\ \, k} \wedge \omega^k_{\ \, j}
where the `\omega^i_{\ \, j}`'s are the connection 1-forms (cf.
:meth:`connection_form`). Let us check it on the frame e::
sage: omega = nab.connection_form
sage: check = []
sage: for i in M.irange(): # long time
....: for j in M.irange():
....: check.append( nab.curvature_form(i,j,e) == \
....: omega(i,j,e).exterior_derivative() + \
....: sum( omega(i,k,e).wedge(omega(k,j,e)) for k in M.irange()) )
sage: check # long time
[True, True, True, True, True, True, True, True, True]
"""
if frame is None:
frame = self._domain._def_frame
if frame not in self._curvature_forms:
forms = {}
frame_dom = frame.domain()
riemann_comp = self.riemann().comp(frame)
for i1 in self._domain.irange():
for j1 in self._domain.irange():
name = "curvature ({},{}) of connection ".format(i1,j1) + \
self._name + " w.r.t. {}".format(frame)
latex_name = r"\Omega^" + str(i1) + r"_{\ \, " + \
str(j1) + "}"
omega = frame_dom.diff_form(2, name=name,
latex_name=latex_name)
comega = omega.set_comp(frame)
for k in self._domain.irange():
for l in self._domain.irange(start=k+1):
comega[k,l] = riemann_comp[[i1,j1,k,l]]
forms[(i1,j1)] = omega
self._curvature_forms[frame] = forms
return self._curvature_forms[frame][(i,j)]
def set_calc_order(self, symbol, order, truncate=False):
r"""
Trigger a series expansion with respect to a small parameter in
computations involving ``self``.
This property is propagated by usual operations. The internal
representation must be ``SR`` for this to take effect.
INPUT:
- ``symbol`` -- symbolic variable (the "small parameter" `\epsilon`)
with respect to which the connection coefficients are expanded in
power series
- ``order`` -- integer; the order `n` of the expansion, defined as the
degree of the polynomial representing the truncated power series in
``symbol``
- ``truncate`` -- (default: ``False``) determines whether the
connection coefficients are replaced by their expansions to the
given order
EXAMPLES::
sage: M = Manifold(4, 'M', structure='Lorentzian')
sage: C.<t,x,y,z> = M.chart()
sage: e = var('e')
sage: g = M.metric()
sage: h = M.tensor_field(0, 2, sym=(0,1))
sage: g[0, 0], g[1, 1], g[2, 2], g[3, 3] = -1, 1, 1, 1
sage: h[0, 1] = x
sage: g.set(g + e*h)
sage: g[:]
[ -1 e*x 0 0]
[e*x 1 0 0]
[ 0 0 1 0]
[ 0 0 0 1]
sage: nab = g.connection()
sage: nab[0, 1, 1]
-e/(e^2*x^2 + 1)
sage: nab.set_calc_order(e, 1, truncate=True)
sage: nab[0, 1, 1]
-e
"""
for coef in self._coefficients.values():
for ind in coef.non_redundant_index_generator():
coef[ind]._expansion_symbol = symbol
coef[ind]._order = order
if truncate:
coef[ind].simplify()
self._del_derived()
def __hash__(self):
r"""
Hash function.
TESTS::
sage: M = Manifold(2, 'M')
sage: X.<x,y> = M.chart()
sage: nab = M.affine_connection('nabla', latex_name=r'\nabla')
sage: hash(nab) == nab.__hash__()
True
Let us check that ``nab`` can be used as a dictionary key::
sage: {nab: 1}[nab]
1
"""
if self._hash == -1:
self._hash = hash(repr(self))
return self._hash
| 40.223077
| 125
| 0.507777
|
7266f15f4164e7d6acf5c6103440ac0e1f41341f
| 1,167
|
py
|
Python
|
setup.py
|
claymation/django-toggles
|
a3687701b7494550eb36f14f85b46472c29591df
|
[
"MIT"
] | 3
|
2016-07-16T09:57:20.000Z
|
2021-04-03T17:39:49.000Z
|
setup.py
|
claymation/django-toggles
|
a3687701b7494550eb36f14f85b46472c29591df
|
[
"MIT"
] | 1
|
2016-10-22T18:31:06.000Z
|
2016-10-22T18:31:06.000Z
|
setup.py
|
claymation/django-toggles
|
a3687701b7494550eb36f14f85b46472c29591df
|
[
"MIT"
] | null | null | null |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-toggles',
version='0.1',
packages=['toggles'],
include_package_data=True,
license='MIT License',
description='',
long_description=README,
url='https://github.com/claymation/django-toggles',
author='Clay McClure',
author_email='clay@daemons.net',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 31.540541
| 78
| 0.620394
|
c33788391d4396f6d763f3c1d0d3dfa12802c660
| 11,755
|
py
|
Python
|
tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py
|
lolyu/sonic-mgmt
|
ed888fd1ce26e7f44fd7f70af00c43ace4882668
|
[
"Apache-2.0"
] | 132
|
2016-10-19T12:34:44.000Z
|
2022-03-16T09:00:39.000Z
|
tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py
|
lolyu/sonic-mgmt
|
ed888fd1ce26e7f44fd7f70af00c43ace4882668
|
[
"Apache-2.0"
] | 3,152
|
2016-09-21T23:05:58.000Z
|
2022-03-31T23:29:08.000Z
|
tests/snappi/pfc/test_pfc_pause_lossy_with_snappi.py
|
lolyu/sonic-mgmt
|
ed888fd1ce26e7f44fd7f70af00c43ace4882668
|
[
"Apache-2.0"
] | 563
|
2016-09-20T01:00:15.000Z
|
2022-03-31T22:43:54.000Z
|
import logging
import pytest
from files.helper import run_pfc_test
from tests.common.helpers.assertions import pytest_assert, pytest_require
from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
fanout_graph_facts
from tests.common.snappi.snappi_fixtures import snappi_api_serv_ip, snappi_api_serv_port,\
snappi_api, snappi_testbed_config
from tests.common.snappi.qos_fixtures import prio_dscp_map, all_prio_list, lossless_prio_list,\
lossy_prio_list
from tests.common.reboot import reboot
from tests.common.utilities import wait_until
logger = logging.getLogger(__name__)
pytestmark = [ pytest.mark.topology('snappi') ]
def test_pfc_pause_single_lossy_prio(snappi_api,
snappi_testbed_config,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
rand_one_dut_portname_oper_up,
enum_dut_lossy_prio,
all_prio_list,
prio_dscp_map):
"""
Test if PFC will impact a single lossy priority
Args:
snappi_api (pytest fixture): SNAPPI session
snappi_testbed_config (pytest fixture): testbed configuration information
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
enum_dut_lossy_prio (str): name of lossy priority to test, e.g., 's6100-1|2'
all_prio_list (pytest fixture): list of all the priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
N/A
"""
dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
dut_hostname2, lossy_prio = enum_dut_lossy_prio.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
"Priority and port are not mapped to the expected DUT")
testbed_config, port_config_list = snappi_testbed_config
duthost = duthosts[rand_one_dut_hostname]
lossy_prio = int(lossy_prio)
pause_prio_list = [lossy_prio]
test_prio_list = [lossy_prio]
bg_prio_list = [p for p in all_prio_list]
bg_prio_list.remove(lossy_prio)
run_pfc_test(api=snappi_api,
testbed_config=testbed_config,
port_config_list=port_config_list,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=False)
def test_pfc_pause_multi_lossy_prio(snappi_api,
snappi_testbed_config,
conn_graph_facts,
fanout_graph_facts,
duthosts,
rand_one_dut_hostname,
rand_one_dut_portname_oper_up,
lossless_prio_list,
lossy_prio_list,
prio_dscp_map):
"""
Test if PFC will impact multiple lossy priorities
Args:
snappi_api (pytest fixture): SNAPPI session
snappi_testbed_config (pytest fixture): testbed configuration information
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
lossless_prio_list (pytest fixture): list of all the lossless priorities
lossy_prio_list (pytest fixture): list of all the lossy priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
Returns:
N/A
"""
dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
testbed_config, port_config_list = snappi_testbed_config
duthost = duthosts[rand_one_dut_hostname]
pause_prio_list = lossy_prio_list
test_prio_list = lossy_prio_list
bg_prio_list = lossless_prio_list
run_pfc_test(api=snappi_api,
testbed_config=testbed_config,
port_config_list=port_config_list,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=False)
@pytest.mark.disable_loganalyzer
@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast'])
def test_pfc_pause_single_lossy_prio_reboot(snappi_api,
snappi_testbed_config,
conn_graph_facts,
fanout_graph_facts,
localhost,
duthosts,
rand_one_dut_hostname,
rand_one_dut_portname_oper_up,
rand_lossy_prio,
all_prio_list,
prio_dscp_map,
reboot_type):
"""
Test if PFC will impact a single lossy priority after various kinds of reboots
Args:
snappi_api (pytest fixture): SNAPPI session
snappi_testbed_config (pytest fixture): testbed configuration information
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
localhost (pytest fixture): localhost handle
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
rand_lossy_prio (str): lossy priority to test, e.g., 's6100-1|2'
all_prio_list (pytest fixture): list of all the priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
reboot_type (str): reboot type to be issued on the DUT
Returns:
N/A
"""
dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
dut_hostname2, lossy_prio = rand_lossy_prio.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
"Priority and port are not mapped to the expected DUT")
testbed_config, port_config_list = snappi_testbed_config
duthost = duthosts[rand_one_dut_hostname]
lossy_prio = int(lossy_prio)
pause_prio_list = [lossy_prio]
test_prio_list = [lossy_prio]
bg_prio_list = [p for p in all_prio_list]
bg_prio_list.remove(lossy_prio)
logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname))
reboot(duthost, localhost, reboot_type=reboot_type)
logger.info("Wait until the system is stable")
pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started),
"Not all critical services are fully started")
run_pfc_test(api=snappi_api,
testbed_config=testbed_config,
port_config_list=port_config_list,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=False)
@pytest.mark.disable_loganalyzer
@pytest.mark.parametrize('reboot_type', ['warm', 'cold', 'fast'])
def test_pfc_pause_multi_lossy_prio_reboot(snappi_api,
snappi_testbed_config,
conn_graph_facts,
fanout_graph_facts,
localhost,
duthosts,
rand_one_dut_hostname,
rand_one_dut_portname_oper_up,
lossless_prio_list,
lossy_prio_list,
prio_dscp_map,
reboot_type):
"""
Test if PFC will impact multiple lossy priorities after various kinds of reboots
Args:
snappi_api (pytest fixture): SNAPPI session
snappi_testbed_config (pytest fixture): testbed configuration information
conn_graph_facts (pytest fixture): connection graph
fanout_graph_facts (pytest fixture): fanout graph
localhost (pytest fixture): localhost handle
duthosts (pytest fixture): list of DUTs
rand_one_dut_hostname (str): hostname of DUT
rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
lossless_prio_list (pytest fixture): list of all the lossless priorities
lossy_prio_list (pytest fixture): list of all the lossy priorities
prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority).
reboot_type (str): reboot type to be issued on the DUT
Returns:
N/A
"""
dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
testbed_config, port_config_list = snappi_testbed_config
duthost = duthosts[rand_one_dut_hostname]
pause_prio_list = lossy_prio_list
test_prio_list = lossy_prio_list
bg_prio_list = lossless_prio_list
logger.info("Issuing a {} reboot on the dut {}".format(reboot_type, duthost.hostname))
reboot(duthost, localhost, reboot_type=reboot_type)
logger.info("Wait until the system is stable")
pytest_assert(wait_until(300, 20, 0, duthost.critical_services_fully_started),
"Not all critical services are fully started")
run_pfc_test(api=snappi_api,
testbed_config=testbed_config,
port_config_list=port_config_list,
conn_data=conn_graph_facts,
fanout_data=fanout_graph_facts,
duthost=duthost,
dut_port=dut_port,
global_pause=False,
pause_prio_list=pause_prio_list,
test_prio_list=test_prio_list,
bg_prio_list=bg_prio_list,
prio_dscp_map=prio_dscp_map,
test_traffic_pause=False)
| 44.695817
| 95
| 0.604339
|
b8475ca711d95c2b7e81fe8713fd54938b5f1ddc
| 1,708
|
py
|
Python
|
main3.py
|
mitliagkas/pyliakmon
|
28a22be63646faf39f0f96a779a4579f69bfb41a
|
[
"MIT"
] | 3
|
2016-05-27T13:46:53.000Z
|
2017-04-16T22:43:42.000Z
|
main3.py
|
mitliagkas/pyliakmon
|
28a22be63646faf39f0f96a779a4579f69bfb41a
|
[
"MIT"
] | null | null | null |
main3.py
|
mitliagkas/pyliakmon
|
28a22be63646faf39f0f96a779a4579f69bfb41a
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy import linalg as la
import time
import subprocess
from streaming import *
import MovieLens
from multiprocessing import Process
from multiprocessing import Array
def runBOI(k,id,arr,allT,nWorkers,doneWithBlock,cond):
boi=ParallelBlockOrthogonal(
id=id,
arr=arr,
allT=allT,
doneWithBlock=doneWithBlock,
cond=cond,
k=k,
stream=MovieLens.UserStream(file='/var/datasets/ml-10M100K/ratingsTab'+str(nWorkers)+str(id)+'.dat')
)
for x in boi:
continue
print boi.getEstimate().T[:,0:3]
print np.dot(boi.getEstimate().T,np.loadtxt('mlpc.txt'))
return boi.getEstimate()
if __name__ == "__main__":
t0 = time.time()
p=65133
k=1
nWorkers=2
if False:
pl = Pool(nWorkers)
#results=pl.map(runBOI, [{'id':x,'k':k,'acc':acc,'allT':allT} for x in xrange(1,nWorkers+1)])
#results=pl.map(runBOI, [{'id':x,'k':k} for x in xrange(1,nWorkers+1)])
else:
arr = Array('d', p*k)
allT = Array('I', nWorkers,lock=False)
doneWithBlock = Array('I', nWorkers,lock=False)
cond = Condition()
processes=[]
for id in xrange(1,nWorkers+1):
arg={'id':id,'k':k,'arr':arr,'allT':allT,'nWorkers':nWorkers,'doneWithBlock':doneWithBlock,'cond':cond}
processes += [Process(target=runBOI, kwargs=arg)]
processes[-1].start()
# Join them
for id in xrange(1,nWorkers+1):
processes[id-1].join()
t1 = time.time()
total = t1-t0
print "Total time: ", total
#print np.dot(results[0].T,results[1])
| 26.276923
| 115
| 0.581967
|
437323aa270e187117807fda8a43a9c258fa41a2
| 3,723
|
py
|
Python
|
enas_lm/src/data_utils.py
|
MODU-FTNC/google-research
|
7c4f9b6e375eaacf50d8280b26f7e42a06b568aa
|
[
"Apache-2.0"
] | 1
|
2020-08-14T08:11:30.000Z
|
2020-08-14T08:11:30.000Z
|
enas_lm/src/data_utils.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | 5
|
2021-08-25T16:15:36.000Z
|
2022-02-10T04:34:04.000Z
|
enas_lm/src/data_utils.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | 1
|
2020-03-05T09:24:01.000Z
|
2020-03-05T09:24:01.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load picked Penn Treebank data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
def input_producer(raw_data, batch_size, num_steps, shuffle=False,
randomize=False, random_len=False):
"""Produces graph-based input for Penn Treebank.
Args:
raw_data: np tensor of size [num_words].
batch_size: self-explained.
num_steps: number of BPTT steps.
shuffle: whether to shuffle sentences.
randomize: use random segments instead of the continuous corpus.
random_len: random sequence len.
Returns:
If `random_len` is set, return op that represents whether we have reached
the end of a sequence.
Otherwise, return number of batches in an epoch.
"""
num_batches_per_epoch = ((np.size(raw_data) // batch_size) - 1) // num_steps
raw_data = tf.convert_to_tensor(raw_data, name='raw_data', dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
with tf.device('/cpu:0'):
epoch_size = tf.identity(epoch_size, name='epoch_size')
if random_len:
start_idx = tf.Variable(0, name='start_idx', dtype=tf.int32,
trainable=False)
base_bptt = tf.cond(
tf.random_uniform(shape=(), minval=0., maxval=1.) < 0.95,
lambda: tf.cast(num_steps, dtype=tf.float32),
lambda: tf.cast(num_steps, dtype=tf.float32) / 2.)
seq_len = tf.random.truncated_normal(shape=(), mean=base_bptt, stddev=5.,
dtype=tf.float32)
seq_len = tf.cast(seq_len, dtype=tf.int32)
seq_len = tf.minimum(seq_len, num_steps + 20) # seq_len <= bptt + 40
seq_len = tf.minimum(seq_len, batch_len - start_idx - 1)
end_idx = start_idx + seq_len
x = data[:, start_idx : end_idx]
y = data[:, start_idx+1 : end_idx+1]
with tf.control_dependencies([x, y]):
with tf.control_dependencies([tf.assign(start_idx, end_idx)]):
should_reset = tf.greater_equal(end_idx, batch_len - 3)
reset_start_idx = tf.assign(start_idx, 0)
return (x, y, num_batches_per_epoch, reset_start_idx, should_reset,
base_bptt)
if randomize:
i = tf.random_uniform([1], minval=0, maxval=batch_len - num_steps,
dtype=tf.int32)
x = tf.strided_slice(data, [0, i], [batch_size, i + num_steps])
y = tf.strided_slice(data, [0, i + 1], [batch_size, i + num_steps + 1])
else:
i = tf.train.range_input_producer(epoch_size, shuffle=shuffle).dequeue()
x = tf.strided_slice(
data, [0, i * num_steps], [batch_size, (i + 1) * num_steps])
y = tf.strided_slice(
data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1])
x.set_shape([batch_size, num_steps])
y.set_shape([batch_size, num_steps])
return x, y, num_batches_per_epoch
| 38.381443
| 79
| 0.663443
|
9027f8d6b962c9250816fda62320e7ec218cad64
| 1,864
|
py
|
Python
|
src/job-exporter/test/test_ps.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 1,417
|
2019-05-07T00:51:36.000Z
|
2022-03-31T10:15:31.000Z
|
src/job-exporter/test/test_ps.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 2,447
|
2019-05-07T01:36:32.000Z
|
2022-03-30T08:47:43.000Z
|
src/job-exporter/test/test_ps.py
|
wyatuestc/pai
|
65b44e1ab37cab0790af392a016cc9fb1d2318fe
|
[
"MIT"
] | 329
|
2019-05-07T02:28:06.000Z
|
2022-03-29T06:12:49.000Z
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
import base
sys.path.append(os.path.abspath("../src/"))
import ps
class TestPS(base.TestBase):
"""
Test ps.py
"""
def test_parse_ps_result(self):
sample_path = "data/ps_sample.txt"
with open(sample_path, "r") as f:
ps_result = f.read()
parse_result = ps.parse_result(ps_result)
self.assertEqual(4, len(parse_result))
self.assertEqual("D", parse_result[0].state)
self.assertEqual("4", parse_result[0].pid)
self.assertEqual(2 * 1024, parse_result[0].rss)
self.assertEqual("/var/drivers/nvidia/current/bin/nvidia-smi -q -x",
parse_result[0].cmd)
if __name__ == '__main__':
unittest.main()
| 39.659574
| 128
| 0.723712
|
d959e2f2ce96964d83ca0b160a0eb0d0096cc6a9
| 2,258
|
py
|
Python
|
src/c3nav/editor/views/base.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
src/c3nav/editor/views/base.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
src/c3nav/editor/views/base.py
|
bate/c3nav
|
9a86dd3eaeb3a10af3c5fa869575ed1e9300465a
|
[
"Apache-2.0"
] | null | null | null |
from functools import wraps
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseNotModified, HttpResponseRedirect
from django.shortcuts import render
from django.utils.cache import patch_vary_headers
from django.utils.translation import get_language
from c3nav.editor.models import ChangeSet
from c3nav.mapdata.models.access import AccessPermission
from c3nav.mapdata.utils.user import can_access_editor
def sidebar_view(func=None, select_related=None):
if func is None:
def wrapped(inner_func):
return sidebar_view(inner_func, select_related)
return wrapped
@wraps(func)
def with_ajax_check(request, *args, **kwargs):
if not can_access_editor(request):
raise PermissionDenied
request.changeset = ChangeSet.get_for_request(request, select_related)
ajax = request.is_ajax() or 'ajax' in request.GET
if not ajax:
request.META.pop('HTTP_IF_NONE_MATCH', None)
response = func(request, *args, **kwargs)
if ajax:
if isinstance(response, HttpResponseRedirect):
return render(request, 'editor/redirect.html', {'target': response['location']})
if not isinstance(response, HttpResponseNotModified):
response.write(render(request, 'editor/fragment_nav.html', {}).content)
response['Cache-Control'] = 'no-cache'
patch_vary_headers(response, ('X-Requested-With', ))
return response
if isinstance(response, HttpResponseRedirect):
return response
response = render(request, 'editor/map.html', {'content': response.content.decode()})
response['Cache-Control'] = 'no-cache'
patch_vary_headers(response, ('X-Requested-With', ))
return response
return with_ajax_check
def etag_func(request, *args, **kwargs):
try:
changeset = request.changeset
except AttributeError:
changeset = ChangeSet.get_for_request(request)
request.changeset = changeset
return (get_language() + ':' + changeset.raw_cache_key_by_changes + ':' +
AccessPermission.cache_key_for_request(request, with_update=False) + ':' + str(request.user.pk or 0))
| 37.633333
| 113
| 0.689105
|
d70257e7afe34ac2d45d06fc0d243ce5899e4add
| 7,053
|
py
|
Python
|
08_Convolutional_Neural_Networks/06_Deepdream/06_deepdream.py
|
varunjha089/tensorflow_cookbook
|
c1fa5051c860ecb6de875db975465ced06f43ba6
|
[
"MIT"
] | 93
|
2018-05-27T08:07:02.000Z
|
2022-02-28T11:18:08.000Z
|
08_Convolutional_Neural_Networks/06_Deepdream/06_deepdream.py
|
varunjha089/tensorflow_cookbook
|
c1fa5051c860ecb6de875db975465ced06f43ba6
|
[
"MIT"
] | 2
|
2018-03-07T14:31:22.000Z
|
2018-03-07T15:04:17.000Z
|
08_Convolutional_Neural_Networks/06_Deepdream/06_deepdream.py
|
varunjha089/tensorflow_cookbook
|
c1fa5051c860ecb6de875db975465ced06f43ba6
|
[
"MIT"
] | 75
|
2018-06-22T08:02:03.000Z
|
2022-03-10T14:38:44.000Z
|
# Using TensorFlow for Deep Dream
#---------------------------------------
# From: Alexander Mordvintsev
# --https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/tutorials/deepdream
#
# Make sure to download the deep dream model here:
# https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip
#
# Run:
# me@computer:~$ wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip
# me@computer:~$ unzip inception5h.zip
#
# More comments added inline.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import matplotlib.pyplot as plt
import numpy as np
import PIL.Image
import tensorflow as tf
from io import BytesIO
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
os.chdir('/home/nick/Documents/tensorflow/inception-v1-model/')
# Model location
model_fn = 'tensorflow_inception_graph.pb'
# Load graph parameters
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Create placeholder for input
t_input = tf.placeholder(np.float32, name='input')
# Imagenet average bias to subtract off images
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
# Create a list of layers that we can refer to later
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
# Count how many outputs for each layer
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
# Print count of layers and outputs (features nodes)
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 30 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
# First make sure everything is between 0 and 255
a = np.uint8(np.clip(a, 0, 1)*255)
# Pick an in-memory format for image display
f = BytesIO()
# Create the in memory image
PIL.Image.fromarray(a).save(f, fmt)
# Show image
plt.imshow(a)
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
# The following function returns a function wrapper that will create the placeholder
# inputs of a specified dtype
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
# Change 'img' size by linear interpolation
return tf.image.resize_bilinear(img, size)[0,:,:,:]
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
# Pick a subregion square size
sz = tile_size
# Get the image height and width
h, w = img.shape[:2]
# Get a random shift amount in the x and y direction
sx, sy = np.random.randint(sz, size=2)
# Randomly shift the image (roll image) in the x and y directions
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
# Initialize the while image gradient as zeros
grad = np.zeros_like(img)
# Now we loop through all the sub-tiles in the image
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
# Select the sub image tile
sub = img_shift[y:y+sz,x:x+sz]
# Calculate the gradient for the tile
g = sess.run(t_grad, {t_input:sub})
# Apply the gradient of the tile to the whole image gradient
grad[y:y+sz,x:x+sz] = g
# Return the gradient, undoing the roll operation
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def render_deepdream(t_obj, img0=img_noise,
iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
# defining the optimization objective, the objective is the mean of the feature
t_score = tf.reduce_mean(t_obj)
# Our gradients will be defined as changing the t_input to get closer to
# the values of t_score. Here, t_score is the mean of the feature we select,
# and t_input will be the image octave (starting with the last)
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# Store the image
img = img0
# Initialize the octave list
octaves = []
# Since we stored the image, we need to only calculate n-1 octaves
for i in range(octave_n-1):
# Extract the image shape
hw = img.shape[:2]
# Resize the image, scale by the octave_scale (resize by linear interpolation)
lo = resize(img, np.int32(np.float32(hw)/octave_scale))
# Residual is hi. Where residual = image - (Resize lo to be hw-shape)
hi = img-resize(lo, hw)
# Save the lo image for re-iterating
img = lo
# Save the extracted hi-image
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
# Start with the last octave
hi = octaves[-octave]
#
img = resize(img, hi.shape[:2])+hi
for i in range(iter_n):
# Calculate gradient of the image.
g = calc_grad_tiled(img, t_grad)
# Ideally, we would just add the gradient, g, but
# we want do a forward step size of it ('step'),
# and divide it by the avg. norm of the gradient, so
# we are adding a gradient of a certain size each step.
# Also, to make sure we aren't dividing by zero, we add 1e-7.
img += g*(step / (np.abs(g).mean()+1e-7))
print('.',end = ' ')
showarray(img/255.0)
# Run Deep Dream
if __name__=="__main__":
# Create resize function that has a wrapper that creates specified placeholder types
resize = tffunc(np.float32, np.int32)(resize)
# Open image
img0 = PIL.Image.open('book_cover.jpg')
img0 = np.float32(img0)
# Show Original Image
showarray(img0/255.0)
# Create deep dream
render_deepdream(T(layer)[:,:,:,channel], img0, iter_n=15)
sess.close()
| 36.734375
| 101
| 0.673614
|
1d88410a53e7578d1bb80903d8e4c8ec886ff785
| 2,238
|
py
|
Python
|
preprocessing/mnist-binary-preprocess.py
|
johnwikman/miking-ml
|
3301f0b92efa5d10f7e50b66ecf7e7ddfb960650
|
[
"MIT"
] | null | null | null |
preprocessing/mnist-binary-preprocess.py
|
johnwikman/miking-ml
|
3301f0b92efa5d10f7e50b66ecf7e7ddfb960650
|
[
"MIT"
] | null | null | null |
preprocessing/mnist-binary-preprocess.py
|
johnwikman/miking-ml
|
3301f0b92efa5d10f7e50b66ecf7e7ddfb960650
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MNIST dataset decoding (http://yann.lecun.com/exdb/mnist/), binary
# representation to ASCII representation, takes roughly ~4x the space of the
# binary file
import struct
import sys
def uint32_from_be(bs):
assert len(bs) == 4
return struct.unpack(">I", bs)[0]
if len(sys.argv) != 4:
print(f"usage: {sys.argv[0]} <binary data file> <binary label file> <target ASCII file>")
bin_datafile = sys.argv[1]
bin_labelfile = sys.argv[2]
outfile = sys.argv[3]
datapoints = []
print(f"reading images from {bin_datafile}...")
with open(bin_datafile, "rb") as f:
bs = f.read()
magic_number = uint32_from_be(bs[0:4])
assert magic_number == 2051, f"Invalid magic number {magic_number}, expected 2051"
n_images = uint32_from_be(bs[4:8])
rows = uint32_from_be(bs[8:12])
cols = uint32_from_be(bs[12:16])
assert rows == 28, f"Expected number of rows to be 28, got {rows}"
assert cols == 28, f"Expected number of cols to be 28, got {cols}"
expected_bytes = 16 + (rows * cols * n_images)
assert expected_bytes == len(bs), f"Expected number of bytes to be {expected_bytes}, got {len(bs)}"
for i in range(n_images):
start = 16 + (i * rows * cols)
end = 16 + ((i + 1) * rows * cols)
datapoints.append((None, bs[start:end]))
print(f"reading labels from {bin_labelfile}...")
with open(bin_labelfile, "rb") as f:
bs = f.read()
magic_number = uint32_from_be(bs[0:4])
assert magic_number == 2049, f"Invalid magic number {magic_number}, expected 2049"
n_labels = uint32_from_be(bs[4:8])
assert n_labels == len(datapoints), f"Expected number of labels to be {len(datapoints)}, got {n_labels}"
expected_bytes = 8 + n_labels
assert expected_bytes == len(bs), f"Expected number of bytes to be {expected_bytes}, got {len(bs)}"
for i in range(n_labels):
_, data = datapoints[i]
datapoints[i] = (int(bs[8+i]), data)
print(f"writing to {outfile}...")
# Outfile format is the bytes written in decimal, with LF separating datapoints
with open(outfile, "w+") as f:
for (cls, data) in datapoints:
f.write(f"{cls}")
for d in data:
f.write(f" {d}")
f.write("\n")
print("done.")
| 35.52381
| 108
| 0.652368
|
e80955ce86346623572b7125fd787576aefbf7fd
| 9,557
|
py
|
Python
|
pydantic/dataclasses.py
|
raushanraja/pydantic-classy
|
97b6d9307b065579c34e724115699212999eaace
|
[
"MIT"
] | null | null | null |
pydantic/dataclasses.py
|
raushanraja/pydantic-classy
|
97b6d9307b065579c34e724115699212999eaace
|
[
"MIT"
] | 67
|
2021-08-16T04:16:03.000Z
|
2022-03-28T04:08:51.000Z
|
pydantic/dataclasses.py
|
raushanraja/pydantic-classy
|
97b6d9307b065579c34e724115699212999eaace
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, TypeVar, Union, overload
from .class_validators import gather_all_validators
from .error_wrappers import ValidationError
from .errors import DataclassTypeError
from .fields import Field, FieldInfo, Required, Undefined
from .main import create_model, validate_model
from .typing import resolve_annotations
from .utils import ClassAttribute
if TYPE_CHECKING:
from .main import BaseConfig, BaseModel # noqa: F401
from .typing import CallableGenerator, NoArgAnyCallable
DataclassT = TypeVar('DataclassT', bound='Dataclass')
class Dataclass:
__pydantic_model__: Type[BaseModel]
__initialised__: bool
__post_init_original__: Optional[Callable[..., None]]
__processed__: Optional[ClassAttribute]
__has_field_info_default__: bool # whether or not a `pydantic.Field` is used as default value
def __init__(self, *args: Any, **kwargs: Any) -> None:
pass
@classmethod
def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator':
pass
@classmethod
def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
pass
def __call__(self: 'DataclassT', *args: Any, **kwargs: Any) -> 'DataclassT':
pass
def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
if isinstance(v, cls):
return v
elif isinstance(v, (list, tuple)):
return cls(*v)
elif isinstance(v, dict):
return cls(**v)
# In nested dataclasses, v can be of type `dataclasses.dataclass`.
# But to validate fields `cls` will be in fact a `pydantic.dataclasses.dataclass`,
# which inherits directly from the class of `v`.
elif is_builtin_dataclass(v) and cls.__bases__[0] is type(v):
import dataclassy as dataclasses
return cls(**dataclasses.asdict(v))
else:
raise DataclassTypeError(class_name=cls.__name__)
def _get_validators(cls: Type['Dataclass']) -> 'CallableGenerator':
yield cls.__validate__
def setattr_validate_assignment(self: 'Dataclass', name: str, value: Any) -> None:
if self.__initialised__:
d = dict(self.__dict__)
d.pop(name, None)
known_field = self.__pydantic_model__.__fields__.get(name, None)
if known_field:
value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__)
if error_:
raise ValidationError([error_], self.__class__)
object.__setattr__(self, name, value)
def is_builtin_dataclass(_cls: Type[Any]) -> bool:
"""
`dataclasses.is_dataclass` is True if one of the class parents is a `dataclass`.
This is why we also add a class attribute `__processed__` to only consider 'direct' built-in dataclasses
"""
import dataclassy as dataclasses
return not hasattr(_cls, '__processed__') and dataclasses.is_dataclass(_cls)
def _generate_pydantic_post_init(
post_init_original: Optional[Callable[..., None]], post_init_post_parse: Optional[Callable[..., None]]
) -> Callable[..., None]:
def _pydantic_post_init(self: 'Dataclass', *initvars: Any) -> None:
if post_init_original is not None:
post_init_original(self, *initvars)
if getattr(self, '__has_field_info_default__', False):
# We need to remove `FieldInfo` values since they are not valid as input
# It's ok to do that because they are obviously the default values!
input_data = {k: v for k, v in self.__dict__.items() if not isinstance(v, FieldInfo)}
else:
input_data = self.__dict__
d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__)
if validation_error:
raise validation_error
object.__setattr__(self, '__dict__', d)
object.__setattr__(self, '__initialised__', True)
if post_init_post_parse is not None:
post_init_post_parse(self, *initvars)
return _pydantic_post_init
def _process_class(
_cls: Type[Any],
init: bool,
repr: bool,
eq: bool,
order: bool,
unsafe_hash: bool,
frozen: bool,
config: Optional[Type[Any]],
) -> Type['Dataclass']:
import dataclassy as dataclasses
post_init_original = getattr(_cls, '__post_init__', None)
if post_init_original and post_init_original.__name__ == '_pydantic_post_init':
post_init_original = None
if not post_init_original:
post_init_original = getattr(_cls, '__post_init_original__', None)
post_init_post_parse = getattr(_cls, '__post_init_post_parse__', None)
_pydantic_post_init = _generate_pydantic_post_init(post_init_original, post_init_post_parse)
# If the class is already a dataclass, __post_init__ will not be called automatically
# so no validation will be added.
# We hence create dynamically a new dataclass:
# ```
# @dataclasses.dataclass
# class NewClass(_cls):
# __post_init__ = _pydantic_post_init
# ```
# with the exact same fields as the base dataclass
# and register it on module level to address pickle problem:
# https://github.com/samuelcolvin/pydantic/issues/2111
if is_builtin_dataclass(_cls):
uniq_class_name = f'_Pydantic_{_cls.__name__}_{id(_cls)}'
_cls = type(
# for pretty output new class will have the name as original
_cls.__name__,
(_cls,),
{
'__annotations__': resolve_annotations(_cls.__annotations__, _cls.__module__),
'__post_init__': _pydantic_post_init,
# attrs for pickle to find this class
'__module__': __name__,
'__qualname__': uniq_class_name,
},
)
globals()[uniq_class_name] = _cls
else:
_cls.__post_init__ = _pydantic_post_init
cls: Type['Dataclass'] = dataclasses.dataclass( # type: ignore
_cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
cls.__processed__ = ClassAttribute('__processed__', True)
field_definitions: Dict[str, Any] = {}
for field in dataclasses.fields(cls):
default: Any = Undefined
default_factory: Optional['NoArgAnyCallable'] = None
field_info: FieldInfo
if field.default is not dataclasses.MISSING:
default = field.default
# mypy issue 7020 and 708
elif field.default_factory is not dataclasses.MISSING: # type: ignore
default_factory = field.default_factory # type: ignore
else:
default = Required
if isinstance(default, FieldInfo):
field_info = default
cls.__has_field_info_default__ = True
else:
field_info = Field(default=default, default_factory=default_factory, **field.metadata)
field_definitions[field.name] = (field.type, field_info)
validators = gather_all_validators(cls)
cls.__pydantic_model__ = create_model(
cls.__name__, __config__=config, __module__=_cls.__module__, __validators__=validators, **field_definitions
)
cls.__initialised__ = False
cls.__validate__ = classmethod(_validate_dataclass) # type: ignore[assignment]
cls.__get_validators__ = classmethod(_get_validators) # type: ignore[assignment]
if post_init_original:
cls.__post_init_original__ = post_init_original
if cls.__pydantic_model__.__config__.validate_assignment and not frozen:
cls.__setattr__ = setattr_validate_assignment # type: ignore[assignment]
return cls
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Callable[[Type[Any]], Type['Dataclass']]:
...
@overload
def dataclass(
_cls: Type[Any],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Type['Dataclass']:
...
def dataclass(
_cls: Optional[Type[Any]] = None,
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Type[Any] = None,
) -> Union[Callable[[Type[Any]], Type['Dataclass']], Type['Dataclass']]:
"""
Like the python standard lib dataclasses but with type validation.
Arguments are the same as for standard dataclasses, except for validate_assignment which has the same meaning
as Config.validate_assignment.
"""
def wrap(cls: Type[Any]) -> Type['Dataclass']:
return _process_class(cls, init, repr, eq, order, unsafe_hash, frozen, config)
if _cls is None:
return wrap
return wrap(_cls)
def make_dataclass_validator(_cls: Type[Any], config: Type['BaseConfig']) -> 'CallableGenerator':
"""
Create a pydantic.dataclass from a builtin dataclass to add type validation
and yield the validators
It retrieves the parameters of the dataclass and forwards them to the newly created dataclass
"""
dataclass_params = _cls.__dataclass_params__
stdlib_dataclass_parameters = {param: getattr(dataclass_params, param) for param in dataclass_params.__slots__}
cls = dataclass(_cls, config=config, **stdlib_dataclass_parameters)
yield from _get_validators(cls)
| 35.660448
| 115
| 0.673956
|
4ad9a21cf624f06989aaadaf2ef61a0bbd911449
| 2,864
|
py
|
Python
|
welcome_wizard/datasources.py
|
nautobot/nautobot-plugin-welcome-wizard
|
de4dfc8e7fbbc6ee526235c93e5ef1eec7989bc8
|
[
"Apache-2.0"
] | 5
|
2021-08-11T19:50:51.000Z
|
2022-01-25T18:36:41.000Z
|
welcome_wizard/datasources.py
|
nautobot/nautobot-plugin-welcome-wizard
|
de4dfc8e7fbbc6ee526235c93e5ef1eec7989bc8
|
[
"Apache-2.0"
] | 3
|
2021-08-30T18:47:50.000Z
|
2021-12-31T22:47:14.000Z
|
welcome_wizard/datasources.py
|
nautobot/nautobot-plugin-welcome-wizard
|
de4dfc8e7fbbc6ee526235c93e5ef1eec7989bc8
|
[
"Apache-2.0"
] | 4
|
2021-08-02T22:19:44.000Z
|
2021-12-05T21:01:52.000Z
|
"""Datasources for Welcome Wizard."""
import os
from pathlib import Path
import yaml
from django.utils.text import slugify
from nautobot.extras.choices import LogLevelChoices
from nautobot.extras.registry import DatasourceContent
from welcome_wizard.models.importer import ManufacturerImport, DeviceTypeImport
def refresh_git_import_wizard(repository_record, job_result, delete=False):
"""Callback for GitRepository updates - refresh Device Types managed by it."""
if "welcome_wizard.import_wizard" not in repository_record.provided_contents or delete:
# TODO Handle delete.
return
manufacturers = set()
device_types = {}
# We have decided that a Git repository can provide YAML files in a
# /animals/ directory at the repository root.
device_type_path = os.path.join(repository_record.filesystem_path, "device-types")
for filename in Path(device_type_path).rglob("*.yaml"):
with open(filename, encoding="utf8") as file:
data = yaml.safe_load(file)
manufacturers.add(data["manufacturer"])
device_types[filename.name] = data
for manufacturer in manufacturers:
# Create or update an ManufacturerImport record based on the provided data
manufacturer_record, _ = ManufacturerImport.objects.update_or_create(
name=manufacturer, slug=slugify(manufacturer)
)
# Record the outcome in the JobResult record
job_result.log(
"Successfully created/updated manufacturer",
obj=manufacturer_record,
level_choice=LogLevelChoices.LOG_SUCCESS,
grouping="welcome_wizard",
)
for device_type, device_data in device_types.items():
device_type_record, _ = DeviceTypeImport.objects.update_or_create(
filename=device_type,
name=device_data["model"],
manufacturer=ManufacturerImport.objects.filter(name=device_data["manufacturer"])[0],
device_type_data=device_data,
)
job_result.log(
"Successfully created/updated device_type",
obj=device_type_record,
level_choice=LogLevelChoices.LOG_SUCCESS,
grouping="welcome_wizard",
)
# Register that Animal records can be loaded from a Git repository,
# and register the callback function used to do so
datasource_contents = [
(
"extras.gitrepository", # datasource class we are registering for
DatasourceContent(
name="Import Wizard", # human-readable name to display in the UI
content_identifier="welcome_wizard.import_wizard", # internal slug to identify the data type
icon="mdi-wizard-hat", # Material Design Icons icon to use in UI
callback=refresh_git_import_wizard, # callback function on GitRepository refresh
),
)
]
| 37.684211
| 105
| 0.695182
|
ea92b53c614933fec68b872a94f34b6f62411d1b
| 13,047
|
py
|
Python
|
flask_restful/fields.py
|
whitekid/flask-restful
|
9242a0cc3a3272af97502c7c85f1627a4edb7351
|
[
"BSD-3-Clause"
] | 1
|
2017-12-30T20:43:28.000Z
|
2017-12-30T20:43:28.000Z
|
flask_restful/fields.py
|
whitekid/flask-restful
|
9242a0cc3a3272af97502c7c85f1627a4edb7351
|
[
"BSD-3-Clause"
] | null | null | null |
flask_restful/fields.py
|
whitekid/flask-restful
|
9242a0cc3a3272af97502c7c85f1627a4edb7351
|
[
"BSD-3-Clause"
] | 1
|
2021-02-12T14:18:02.000Z
|
2021-02-12T14:18:02.000Z
|
from datetime import datetime
from calendar import timegm
import pytz
from decimal import Decimal as MyDecimal, ROUND_HALF_EVEN
from email.utils import formatdate
import six
try:
from urlparse import urlparse, urlunparse
except ImportError:
# python3
from urllib.parse import urlparse, urlunparse
from flask_restful import inputs, marshal
from flask import url_for, request
__all__ = ["String", "FormattedString", "Url", "DateTime", "Float",
"Integer", "Arbitrary", "Nested", "List", "Raw", "Boolean",
"Fixed", "Price"]
class MarshallingException(Exception):
"""
This is an encapsulating Exception in case of marshalling error.
"""
def __init__(self, underlying_exception):
# just put the contextual representation of the error to hint on what
# went wrong without exposing internals
super(MarshallingException, self).__init__(six.text_type(underlying_exception))
def is_indexable_but_not_string(obj):
return not hasattr(obj, "strip") and hasattr(obj, "__iter__")
def get_value(key, obj, default=None):
"""Helper for pulling a keyed value off various types of objects"""
if type(key) == int:
return _get_value_for_key(key, obj, default)
elif callable(key):
return key(obj)
else:
return _get_value_for_keys(key.split('.'), obj, default)
def _get_value_for_keys(keys, obj, default):
if len(keys) == 1:
return _get_value_for_key(keys[0], obj, default)
else:
return _get_value_for_keys(
keys[1:], _get_value_for_key(keys[0], obj, default), default)
def _get_value_for_key(key, obj, default):
if is_indexable_but_not_string(obj):
try:
return obj[key]
except (IndexError, TypeError, KeyError):
pass
return getattr(obj, key, default)
def to_marshallable_type(obj):
"""Helper for converting an object to a dictionary only if it is not
dictionary already or an indexable object nor a simple type"""
if obj is None:
return None # make it idempotent for None
if hasattr(obj, '__getitem__'):
return obj # it is indexable it is ok
if hasattr(obj, '__marshallable__'):
return obj.__marshallable__()
return dict(obj.__dict__)
class Raw(object):
"""Raw provides a base field class from which others should extend. It
applies no formatting by default, and should only be used in cases where
data does not need to be formatted before being serialized. Fields should
throw a :class:`MarshallingException` in case of parsing problem.
:param default: The default value for the field, if no value is
specified.
:param attribute: If the public facing value differs from the internal
value, use this to retrieve a different attribute from the response
than the publicly named value.
"""
def __init__(self, default=None, attribute=None):
self.attribute = attribute
self.default = default
def format(self, value):
"""Formats a field's value. No-op by default - field classes that
modify how the value of existing object keys should be presented should
override this and apply the appropriate formatting.
:param value: The value to format
:exception MarshallingException: In case of formatting problem
Ex::
class TitleCase(Raw):
def format(self, value):
return unicode(value).title()
"""
return value
def output(self, key, obj):
"""Pulls the value for the given key from the object, applies the
field's formatting and returns the result. If the key is not found
in the object, returns the default value. Field classes that create
values which do not require the existence of the key in the object
should override this and return the desired value.
:exception MarshallingException: In case of formatting problem
"""
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
return self.default
return self.format(value)
class Nested(Raw):
"""Allows you to nest one set of fields inside another.
See :ref:`nested-field` for more information
:param dict nested: The dictionary to nest
:param bool allow_null: Whether to return None instead of a dictionary
with null keys, if a nested dictionary has all-null keys
:param kwargs: If ``default`` keyword argument is present, a nested
dictionary will be marshaled as its value if nested dictionary is
all-null keys (e.g. lets you return an empty JSON object instead of
null)
"""
def __init__(self, nested, allow_null=False, **kwargs):
self.nested = nested
self.allow_null = allow_null
super(Nested, self).__init__(**kwargs)
def output(self, key, obj):
value = get_value(key if self.attribute is None else self.attribute, obj)
if value is None:
if self.allow_null:
return None
elif self.default is not None:
return self.default
return marshal(value, self.nested)
class List(Raw):
"""
Field for marshalling lists of other fields.
See :ref:`list-field` for more information.
:param cls_or_instance: The field type the list will contain.
"""
def __init__(self, cls_or_instance, **kwargs):
super(List, self).__init__(**kwargs)
error_msg = ("The type of the list elements must be a subclass of "
"flask_restful.fields.Raw")
if isinstance(cls_or_instance, type):
if not issubclass(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance()
else:
if not isinstance(cls_or_instance, Raw):
raise MarshallingException(error_msg)
self.container = cls_or_instance
def format(self, value):
# Convert all instances in typed list to container type
if isinstance(value, set):
value = list(value)
return [
self.container.output(idx,
val if (isinstance(val, dict)
or (self.container.attribute
and hasattr(val, self.container.attribute)))
and not isinstance(self.container, Nested)
and not type(self.container) is Raw
else value)
for idx, val in enumerate(value)
]
def output(self, key, data):
value = get_value(key if self.attribute is None else self.attribute, data)
# we cannot really test for external dict behavior
if is_indexable_but_not_string(value) and not isinstance(value, dict):
return self.format(value)
if value is None:
return self.default
return [marshal(value, self.container.nested)]
class String(Raw):
"""
Marshal a value as a string. Uses ``six.text_type`` so values will
be converted to :class:`unicode` in python2 and :class:`str` in
python3.
"""
def format(self, value):
try:
return six.text_type(value)
except ValueError as ve:
raise MarshallingException(ve)
class Integer(Raw):
""" Field for outputting an integer value.
:param int default: The default value for the field, if no value is
specified.
"""
def __init__(self, default=0, **kwargs):
super(Integer, self).__init__(default=default, **kwargs)
def format(self, value):
try:
if value is None:
return self.default
return int(value)
except ValueError as ve:
raise MarshallingException(ve)
class Boolean(Raw):
"""
Field for outputting a boolean value.
Empty collections such as ``""``, ``{}``, ``[]``, etc. will be converted to
``False``.
"""
def format(self, value):
return bool(value)
class FormattedString(Raw):
"""
FormattedString is used to interpolate other values from
the response into this field. The syntax for the source string is
the same as the string :meth:`~str.format` method from the python
stdlib.
Ex::
fields = {
'name': fields.String,
'greeting': fields.FormattedString("Hello {name}")
}
data = {
'name': 'Doug',
}
marshal(data, fields)
"""
def __init__(self, src_str):
"""
:param string src_str: the string to format with the other
values from the response.
"""
super(FormattedString, self).__init__()
self.src_str = six.text_type(src_str)
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
return self.src_str.format(**data)
except (TypeError, IndexError) as error:
raise MarshallingException(error)
class Url(Raw):
"""
A string representation of a Url
:param endpoint: Endpoint name. If endpoint is ``None``,
``request.endpoint`` is used instead
:type endpoint: str
:param absolute: If ``True``, ensures that the generated urls will have the
hostname included
:type absolute: bool
:param scheme: URL scheme specifier (e.g. ``http``, ``https``)
:type scheme: str
"""
def __init__(self, endpoint=None, absolute=False, scheme=None):
super(Url, self).__init__()
self.endpoint = endpoint
self.absolute = absolute
self.scheme = scheme
def output(self, key, obj):
try:
data = to_marshallable_type(obj)
endpoint = self.endpoint if self.endpoint is not None else request.endpoint
o = urlparse(url_for(endpoint, _external=self.absolute, **data))
if self.absolute:
scheme = self.scheme if self.scheme is not None else o.scheme
return urlunparse((scheme, o.netloc, o.path, "", "", ""))
return urlunparse(("", "", o.path, "", "", ""))
except TypeError as te:
raise MarshallingException(te)
class Float(Raw):
"""
A double as IEEE-754 double precision.
ex : 3.141592653589793 3.1415926535897933e-06 3.141592653589793e+24 nan inf
-inf
"""
def format(self, value):
try:
return float(value)
except ValueError as ve:
raise MarshallingException(ve)
class Arbitrary(Raw):
"""
A floating point number with an arbitrary precision
ex: 634271127864378216478362784632784678324.23432
"""
def format(self, value):
return six.text_type(MyDecimal(value))
class DateTime(Raw):
"""
Return a formatted datetime string in UTC. Supported formats are RFC 822
and ISO 8601.
See :func:`email.utils.formatdate` for more info on the RFC 822 format.
See :meth:`datetime.datetime.isoformat` for more info on the ISO 8601
format.
:param dt_format: ``'rfc822'`` or ``'iso8601'``
:type dt_format: str
"""
def __init__(self, dt_format='rfc822', **kwargs):
super(DateTime, self).__init__(**kwargs)
self.dt_format = dt_format
def format(self, value):
try:
if self.dt_format == 'rfc822':
return _rfc822(value)
elif self.dt_format == 'iso8601':
return _iso8601(value)
else:
raise MarshallingException(
'Unsupported date format %s' % self.dt_format
)
except AttributeError as ae:
raise MarshallingException(ae)
ZERO = MyDecimal()
class Fixed(Raw):
"""
A decimal number with a fixed precision.
"""
def __init__(self, decimals=5, **kwargs):
super(Fixed, self).__init__(**kwargs)
self.precision = MyDecimal('0.' + '0' * (decimals - 1) + '1')
def format(self, value):
dvalue = MyDecimal(value)
if not dvalue.is_normal() and dvalue != ZERO:
raise MarshallingException('Invalid Fixed precision number.')
return six.text_type(dvalue.quantize(self.precision, rounding=ROUND_HALF_EVEN))
"""Alias for :class:`~fields.Fixed`"""
Price = Fixed
def _rfc822(dt):
"""Turn a datetime object into a formatted date.
Example::
fields._rfc822(datetime(2011, 1, 1)) => "Sat, 01 Jan 2011 00:00:00 -0000"
:param dt: The datetime to transform
:type dt: datetime
:return: A RFC 822 formatted date string
"""
return formatdate(timegm(dt.utctimetuple()))
def _iso8601(dt):
"""Turn a datetime object into an ISO8601 formatted date.
Example::
fields._iso8601(datetime(2012, 1, 1, 0, 0)) => "2012-01-01T00:00:00"
:param dt: The datetime to transform
:type dt: datetime
:return: A ISO 8601 formatted date string
"""
return dt.isoformat()
| 31.212919
| 87
| 0.628114
|
4589b2f7117ad463b68bce78f3c20068feefdcad
| 24,636
|
py
|
Python
|
lib/galaxy/jobs/deferred/data_transfer.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/jobs/deferred/data_transfer.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | 1
|
2015-02-21T18:48:19.000Z
|
2015-02-27T15:50:32.000Z
|
lib/galaxy/jobs/deferred/data_transfer.py
|
vimalkumarvelayudhan/galaxy
|
ea89dd8f149778b6c2f0f3f4a34c8b21f7033af7
|
[
"CC-BY-3.0"
] | 3
|
2015-02-22T13:34:16.000Z
|
2020-10-01T01:28:04.000Z
|
"""
Module for managing data transfer jobs.
"""
import logging, urllib2, re, shutil
from galaxy import eggs
from sqlalchemy import and_
from galaxy.util.odict import odict
from galaxy.workflow.modules import module_factory
from galaxy.jobs.actions.post import ActionBox
from galaxy.jobs.deferred import FakeTrans
from galaxy.tools.parameters import visit_input_values
from galaxy.tools.parameters.basic import DataToolParameter
from galaxy.datatypes import sniff
log = logging.getLogger( __name__ )
__all__ = [ 'DataTransfer' ]
class DataTransfer( object ):
check_interval = 15
dataset_name_re = re.compile( '(dataset\d+)_(name)' )
dataset_datatype_re = re.compile( '(dataset\d+)_(datatype)' )
def __init__( self, app ):
self.app = app
self.sa_session = app.model.context.current
def create_job( self, trans, **kwd ):
raise Exception( "Unimplemented Method" )
def check_job( self, job ):
raise Exception( "Unimplemented Method" )
def run_job( self, job ):
if job.params[ 'type' ] == 'init_transfer':
# TODO: don't create new downloads on restart.
if job.params[ 'protocol' ] in [ 'http', 'https' ]:
results = []
for result in job.params[ 'results' ].values():
result[ 'transfer_job' ] = self.app.transfer_manager.new( protocol=job.params[ 'protocol' ],
name=result[ 'name' ],
datatype=result[ 'datatype' ],
url=result[ 'url' ] )
results.append( result )
elif job.params[ 'protocol' ] == 'scp':
results = []
result = {}
sample_datasets_dict = job.params[ 'sample_datasets_dict' ]
# sample_datasets_dict looks something like the following. The outer dictionary keys are SampleDataset ids.
# {'7': {'status': 'Not started', 'name': '3.bed', 'file_path': '/tmp/library/3.bed', 'sample_id': 7,
# 'external_service_id': 2, 'error_msg': '', 'size': '8.0K'}}
for sample_dataset_id, sample_dataset_info_dict in sample_datasets_dict.items():
result = {}
result[ 'transfer_job' ] = self.app.transfer_manager.new( protocol=job.params[ 'protocol' ],
host=job.params[ 'host' ],
user_name=job.params[ 'user_name' ],
password=job.params[ 'password' ],
sample_dataset_id=sample_dataset_id,
status=sample_dataset_info_dict[ 'status' ],
name=sample_dataset_info_dict[ 'name' ],
file_path=sample_dataset_info_dict[ 'file_path' ],
sample_id=sample_dataset_info_dict[ 'sample_id' ],
external_service_id=sample_dataset_info_dict[ 'external_service_id' ],
error_msg=sample_dataset_info_dict[ 'error_msg' ],
size=sample_dataset_info_dict[ 'size' ] )
results.append( result )
self.app.transfer_manager.run( [ r[ 'transfer_job' ] for r in results ] )
for result in results:
transfer_job = result.pop( 'transfer_job' )
self.create_job( None,
transfer_job_id=transfer_job.id,
result=transfer_job.params,
sample_id=job.params[ 'sample_id' ] )
# Update the state of the relevant SampleDataset
new_status = self.app.model.SampleDataset.transfer_status.IN_QUEUE
self._update_sample_dataset_status( protocol=job.params[ 'protocol' ],
sample_id=job.params[ 'sample_id' ],
result_dict=transfer_job.params,
new_status=new_status,
error_msg='' )
job.state = self.app.model.DeferredJob.states.OK
self.sa_session.add( job )
self.sa_session.flush()
# TODO: Error handling: failure executing, or errors returned from the manager
if job.params[ 'type' ] == 'finish_transfer':
protocol = job.params[ 'protocol' ]
# Update the state of the relevant SampleDataset
new_status = self.app.model.SampleDataset.transfer_status.ADD_TO_LIBRARY
if protocol in [ 'http', 'https' ]:
result_dict = job.params[ 'result' ]
library_dataset_name = result_dict[ 'name' ]
extension = result_dict[ 'datatype' ]
elif protocol in [ 'scp' ]:
# In this case, job.params will be a dictionary that contains a key named 'result'. The value
# of the result key is a dictionary that looks something like:
# {'sample_dataset_id': '8', 'status': 'Not started', 'protocol': 'scp', 'name': '3.bed',
# 'file_path': '/data/library/3.bed', 'host': '127.0.0.1', 'sample_id': 8, 'external_service_id': 2,
# 'local_path': '/tmp/kjl2Ss4', 'password': 'galaxy', 'user_name': 'gvk', 'error_msg': '', 'size': '8.0K'}
try:
tj = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params['transfer_job_id'] ) )
result_dict = tj.params
result_dict['local_path'] = tj.path
except Exception, e:
log.error( "Updated transfer result unavailable, using old result. Error was: %s" % str( e ) )
result_dict = job.params[ 'result' ]
library_dataset_name = result_dict[ 'name' ]
# Determine the data format (see the relevant TODO item in the manual_data_transfer plugin)..
extension = sniff.guess_ext( result_dict[ 'local_path' ], sniff_order=self.app.datatypes_registry.sniff_order )
self._update_sample_dataset_status( protocol=job.params[ 'protocol' ],
sample_id=int( job.params[ 'sample_id' ] ),
result_dict=result_dict,
new_status=new_status,
error_msg='' )
sample = self.sa_session.query( self.app.model.Sample ).get( int( job.params[ 'sample_id' ] ) )
ld = self.app.model.LibraryDataset( folder=sample.folder, name=library_dataset_name )
self.sa_session.add( ld )
self.sa_session.flush()
self.app.security_agent.copy_library_permissions( FakeTrans( self.app ), sample.folder, ld )
ldda = self.app.model.LibraryDatasetDatasetAssociation( name = library_dataset_name,
extension = extension,
dbkey = '?',
library_dataset = ld,
create_dataset = True,
sa_session = self.sa_session )
ldda.message = 'Transferred by the Data Transfer Plugin'
self.sa_session.add( ldda )
self.sa_session.flush()
ldda.state = ldda.states.QUEUED # flushed in the set property
ld.library_dataset_dataset_association_id = ldda.id
self.sa_session.add( ld )
self.sa_session.flush()
try:
# Move the dataset from its temporary location
shutil.move( job.transfer_job.path, ldda.file_name )
ldda.init_meta()
for name, spec in ldda.metadata.spec.items():
if name not in [ 'name', 'info', 'dbkey', 'base_name' ]:
if spec.get( 'default' ):
setattr( ldda.metadata, name, spec.unwrap( spec.get( 'default' ) ) )
self.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( self.app.datatypes_registry.set_external_metadata_tool,
FakeTrans( self.app,
history=sample.history,
user=sample.request.user ),
incoming = { 'input1':ldda } )
ldda.state = ldda.states.OK
# TODO: not sure if this flush is necessary
self.sa_session.add( ldda )
self.sa_session.flush()
except Exception, e:
log.exception( 'Failure preparing library dataset for finished transfer job (id: %s) via deferred job (id: %s):' % \
( str( job.transfer_job.id ), str( job.id ) ) )
ldda.state = ldda.states.ERROR
if sample.workflow:
log.debug( "\n\nLogging sample mappings as: %s" % sample.workflow[ 'mappings' ] )
log.debug( "job.params: %s" % job.params )
# We have a workflow. Update all mappings to ldda's, and when the final one is done
# execute_workflow with either the provided history, or a new one.
sub_done = True
rep_done = False
for k, v in sample.workflow[ 'mappings' ].iteritems():
if not 'hda' in v and v[ 'ds_tag' ].startswith( 'hi|' ):
sample.workflow[ 'mappings' ][ k ][ 'hda' ] = self.app.security.decode_id( v[ 'ds_tag' ][3:] )
for key, value in sample.workflow[ 'mappings' ].iteritems():
if 'url' in value and value[ 'url' ] == job.params[ 'result' ][ 'url' ]:
# DBTODO Make sure all ds| mappings get the URL of the dataset, for linking to later.
# If this dataset maps to what we just finished, update the ldda id in the sample.
sample.workflow[ 'mappings' ][ key ][ 'ldda' ] = ldda.id
rep_done = True
# DBTODO replace the hi| mappings with the hda here. Just rip off the first three chars.
elif not 'ldda' in value and not 'hda' in value:
# We're not done if some mappings still don't have ldda or hda mappings.
sub_done = False
if sub_done and rep_done:
if not sample.history:
new_history = self.app.model.History( name="New History From %s" % sample.name, user=sample.request.user )
self.sa_session.add( new_history )
sample.history = new_history
self.sa_session.flush()
self._execute_workflow( sample )
# Check the workflow for substitution done-ness
self.sa_session.add( sample )
self.sa_session.flush()
elif sample.history:
# We don't have a workflow, but a history was provided.
# No processing, go ahead and chunk everything in the history.
if ldda.dataset.state in [ 'new', 'upload', 'queued', 'running', 'empty', 'discarded' ]:
log.error("Cannot import dataset '%s' to user history since its state is '%s'. " % ( ldda.name, ldda.dataset.state ))
elif ldda.dataset.state in [ 'ok', 'error' ]:
ldda.to_history_dataset_association( target_history=sample.history, add_to_history=True )
# Finished
job.state = self.app.model.DeferredJob.states.OK
self.sa_session.add( job )
self.sa_session.flush()
# Update the state of the relevant SampleDataset
new_status = self.app.model.SampleDataset.transfer_status.COMPLETE
self._update_sample_dataset_status( protocol=job.params[ 'protocol' ],
sample_id=int( job.params[ 'sample_id' ] ),
result_dict=job.params[ 'result' ],
new_status=new_status,
error_msg='' )
if sample.datasets and not sample.untransferred_dataset_files:
# Update the state of the sample to the sample's request type's final state.
new_state = sample.request.type.final_sample_state
self._update_sample_state( sample.id, new_state )
# Update the state of the request, if possible
self._update_request_state( sample.request.id )
def _missing_params( self, params, required_params ):
missing_params = filter( lambda x: x not in params, required_params )
if missing_params:
log.error( 'Job parameters missing required keys: %s' % ', '.join( missing_params ) )
return True
return False
def _update_sample_dataset_status( self, protocol, sample_id, result_dict, new_status, error_msg=None ):
# result_dict looks something like:
# {'url': '127.0.0.1/data/filtered_subreads.fa', 'name': 'Filtered reads'}
# Check if the new status is a valid transfer status
valid_statuses = [ v[1] for v in self.app.model.SampleDataset.transfer_status.items() ]
# TODO: error checking on valid new_status value
if protocol in [ 'http', 'https' ]:
sample_dataset = self.sa_session.query( self.app.model.SampleDataset ) \
.filter( and_( self.app.model.SampleDataset.table.c.sample_id == sample_id,
self.app.model.SampleDataset.table.c.name == result_dict[ 'name' ],
self.app.model.SampleDataset.table.c.file_path == result_dict[ 'url' ] ) ) \
.first()
elif protocol in [ 'scp' ]:
sample_dataset = self.sa_session.query( self.app.model.SampleDataset ).get( int( result_dict[ 'sample_dataset_id' ] ) )
sample_dataset.status = new_status
sample_dataset.error_msg = error_msg
self.sa_session.add( sample_dataset )
self.sa_session.flush()
def _update_sample_state( self, sample_id, new_state, comment=None ):
sample = self.sa_session.query( self.app.model.Sample ).get( sample_id )
if comment is None:
comment = 'Sample state set to %s' % str( new_state )
event = self.app.model.SampleEvent( sample, new_state, comment )
self.sa_session.add( event )
self.sa_session.flush()
def _update_request_state( self, request_id ):
request = self.sa_session.query( self.app.model.Request ).get( request_id )
# Make sure all the samples of the current request have the same state
common_state = request.samples_have_common_state
if not common_state:
# If the current request state is complete and one of its samples moved from
# the final sample state, then move the request state to In-progress
if request.is_complete:
message = "At least 1 sample state moved from the final sample state, so now the request's state is (%s)" % request.states.SUBMITTED
event = self.app.model.RequestEvent( request, request.states.SUBMITTED, message )
self.sa_session.add( event )
self.sa_session.flush()
else:
final_state = False
request_type_state = request.type.final_sample_state
if common_state.id == request_type_state.id:
# Since all the samples are in the final state, change the request state to 'Complete'
comment = "All samples of this sequencing request are in the final sample state (%s). " % request_type_state.name
state = request.states.COMPLETE
final_state = True
else:
comment = "All samples of this sequencing request are in the (%s) sample state. " % common_state.name
state = request.states.SUBMITTED
event = self.app.model.RequestEvent( request, state, comment )
self.sa_session.add( event )
self.sa_session.flush()
# TODO: handle email notification if it is configured to be sent when the samples are in this state.
def _execute_workflow( self, sample):
for key, value in sample.workflow['mappings'].iteritems():
if 'hda' not in value and 'ldda' in value:
# If HDA is already here, it's an external input, we're not copying anything.
ldda = self.sa_session.query( self.app.model.LibraryDatasetDatasetAssociation ).get( value['ldda'] )
if ldda.dataset.state in [ 'new', 'upload', 'queued', 'running', 'empty', 'discarded' ]:
log.error("Cannot import dataset '%s' to user history since its state is '%s'. " % ( ldda.name, ldda.dataset.state ))
elif ldda.dataset.state in [ 'ok', 'error' ]:
hda = ldda.to_history_dataset_association( target_history=sample.history, add_to_history=True )
sample.workflow['mappings'][key]['hda'] = hda.id
self.sa_session.add( sample )
self.sa_session.flush()
workflow_dict = sample.workflow
import copy
new_wf_dict = copy.deepcopy(workflow_dict)
for key in workflow_dict['mappings']:
if not isinstance(key, int):
new_wf_dict['mappings'][int(key)] = workflow_dict['mappings'][key]
workflow_dict = new_wf_dict
fk_trans = FakeTrans(self.app, history = sample.history, user=sample.request.user)
workflow = self.sa_session.query(self.app.model.Workflow).get(workflow_dict['id'])
if not workflow:
log.error("Workflow mapping failure.")
return
if len( workflow.steps ) == 0:
log.error( "Workflow cannot be run because it does not have any steps" )
return
if workflow.has_cycles:
log.error( "Workflow cannot be run because it contains cycles" )
return
if workflow.has_errors:
log.error( "Workflow cannot be run because of validation errors in some steps" )
return
# Build the state for each step
errors = {}
has_upgrade_messages = False
has_errors = False
# Build a fake dictionary prior to execution.
# Prepare each step
for step in workflow.steps:
step.upgrade_messages = {}
# Contruct modules
if step.type == 'tool' or step.type is None:
# Restore the tool state for the step
step.module = module_factory.from_workflow_step( fk_trans, step )
# Fix any missing parameters
step.upgrade_messages = step.module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
step.module.add_dummy_datasets( connections=step.input_connections )
# Store state with the step
step.state = step.module.state
# Error dict
if step.tool_errors:
has_errors = True
errors[step.id] = step.tool_errors
else:
## Non-tool specific stuff?
step.module = module_factory.from_workflow_step( fk_trans, step )
step.state = step.module.get_runtime_state()
# Connections by input name
step.input_connections_by_name = dict( ( conn.input_name, conn ) for conn in step.input_connections )
for step in workflow.steps:
step.upgrade_messages = {}
# Connections by input name
step.input_connections_by_name = \
dict( ( conn.input_name, conn ) for conn in step.input_connections )
# Extract just the arguments for this step by prefix
step_errors = None
if step.type == 'tool' or step.type is None:
module = module_factory.from_workflow_step( fk_trans, step )
# Fix any missing parameters
step.upgrade_messages = module.check_and_update_state()
if step.upgrade_messages:
has_upgrade_messages = True
# Any connected input needs to have value DummyDataset (these
# are not persisted so we need to do it every time)
module.add_dummy_datasets( connections=step.input_connections )
# Get the tool
tool = module.tool
# Get the state
step.state = state = module.state
# Get old errors
old_errors = state.inputs.pop( "__errors__", {} )
if step_errors:
errors[step.id] = state.inputs["__errors__"] = step_errors
# Run each step, connecting outputs to inputs
workflow_invocation = self.app.model.WorkflowInvocation()
workflow_invocation.workflow = workflow
outputs = odict()
for i, step in enumerate( workflow.steps ):
job = None
if step.type == 'tool' or step.type is None:
tool = self.app.toolbox.get_tool( step.tool_id )
def callback( input, value, prefixed_name, prefixed_label ):
if isinstance( input, DataToolParameter ):
if prefixed_name in step.input_connections_by_name:
conn = step.input_connections_by_name[ prefixed_name ]
return outputs[ conn.output_step.id ][ conn.output_name ]
visit_input_values( tool.inputs, step.state.inputs, callback )
job, out_data = tool.execute( fk_trans, step.state.inputs, history=sample.history)
outputs[ step.id ] = out_data
for pja in step.post_job_actions:
if pja.action_type in ActionBox.immediate_actions:
ActionBox.execute(self.app, self.sa_session, pja, job, replacement_dict=None)
else:
job.add_post_job_action(pja)
else:
job, out_data = step.module.execute( fk_trans, step.state)
outputs[ step.id ] = out_data
if step.id in workflow_dict['mappings']:
data = self.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( workflow_dict['mappings'][str(step.id)]['hda'] )
outputs[ step.id ]['output'] = data
workflow_invocation_step = self.app.model.WorkflowInvocationStep()
workflow_invocation_step.workflow_invocation = workflow_invocation
workflow_invocation_step.workflow_step = step
workflow_invocation_step.job = job
self.sa_session.add( workflow_invocation )
self.sa_session.flush()
| 63.98961
| 148
| 0.538927
|
c8d3e3d57b11cbd9ae6bb144336728e433ae1d28
| 3,451
|
py
|
Python
|
docs/conf.py
|
zzehring/operator
|
7ef5543f8d0ef767c4531c2ae002e04bad1e3af3
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
zzehring/operator
|
7ef5543f8d0ef767c4531c2ae002e04bad1e3af3
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
zzehring/operator
|
7ef5543f8d0ef767c4531c2ae002e04bad1e3af3
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# For a full list of options see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
from pathlib import Path
import sys
sys.path.insert(0, str(Path(__file__).parent.parent))
# -- Project information -----------------------------------------------------
project = 'The Operator Framework'
copyright = '2019-2020, Canonical Ltd.'
author = 'Canonical Ltd'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
]
# The document name of the “master” document, that is, the document
# that contains the root toctree directive.
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' # 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for sphinx.ext.todo ---------------------------------------------
# If this is True, todo and todolist produce output, else they
# produce nothing. The default is False.
todo_include_todos = False
# -- Options for sphinx.ext.autodoc ------------------------------------------
# This value controls how to represents typehints. The setting takes the
# following values:
# 'signature' – Show typehints as its signature (default)
# 'description' – Show typehints as content of function or method
# 'none' – Do not show typehints
autodoc_typehints = 'description'
# This value selects what content will be inserted into the main body of an
# autoclass directive. The possible values are:
# 'class' - Only the class’ docstring is inserted. This is the
# default. You can still document __init__ as a separate method
# using automethod or the members option to autoclass.
# 'both' - Both the class’ and the __init__ method’s docstring are
# concatenated and inserted.
# 'init' - Only the __init__ method’s docstring is inserted.
autoclass_content = 'both'
autodoc_default_options = {
'members': None, # None here means "yes"
'undoc-members': None,
'show-inheritance': None,
}
# -- Options for sphinx.ext.intersphinx --------------------------------------
# This config value contains the locations and names of other projects
# that should be linked to in this documentation.
intersphinx_mapping = {'python': ('https://docs.python.org/3', None)}
| 35.214286
| 78
| 0.651695
|
fb592d81f10e84fb7065375be72bdd9e70e52a5d
| 370
|
py
|
Python
|
fdk_client/application/models/LogisticValidator.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/application/models/LogisticValidator.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
fdk_client/application/models/LogisticValidator.py
|
kavish-d/fdk-client-python
|
a1023eb530473322cb52e095fc4ceb226c1e6037
|
[
"MIT"
] | null | null | null |
"""Class Validators."""
from marshmallow import fields, Schema
from marshmallow.validate import OneOf
from ..enums import *
from ..models.BaseSchema import BaseSchema
class LogisticValidator:
class getTatProduct(BaseSchema):
pass
class getPincodeCity(BaseSchema):
pincode = fields.Str(required=False)
| 20.555556
| 44
| 0.662162
|
6a761f995daa221e34f8d88f1d50b178fa2ca4e1
| 961
|
py
|
Python
|
cogs/inactive/nsfw.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-08-09T15:43:07.000Z
|
2022-03-11T15:12:21.000Z
|
cogs/inactive/nsfw.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-10-29T02:32:40.000Z
|
2022-01-13T03:12:45.000Z
|
cogs/inactive/nsfw.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 1
|
2021-06-09T08:06:31.000Z
|
2021-06-09T08:06:31.000Z
|
import random
import aiohttp
from discord.ext import commands
from util.decorators import delete_original
class NSFW(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.trivia_messages = []
self.new_trivia = []
@commands.command(aliases=['porn'], hidden=True)
@commands.is_nsfw()
@delete_original()
async def nsfw(self, ctx, *, query: str):
"""Get a random NSFW Gif via search query"""
base_url = f'https://api.redgifs.com/v1/gfycats/search?search_text={query}&count=100'
async with aiohttp.ClientSession() as session:
async with session.get(base_url) as r:
data = await r.json()
if data['found'] > 0:
gifs = data['gfycats']
rand_gif = random.choice(gifs)
gif_link = rand_gif['gifUrl']
await ctx.send(gif_link)
def setup(bot):
bot.add_cog(NSFW(bot))
| 29.121212
| 93
| 0.58897
|
86cbb6a657f789abbe3777094f650379d9f44c61
| 11,164
|
py
|
Python
|
python/ee/tests/data_test.py
|
sadsad0557/sadsad0557
|
67371ceca73196dfd2ecd03bf7d3f8b2c3368c34
|
[
"Apache-2.0"
] | 1
|
2020-11-03T21:11:55.000Z
|
2020-11-03T21:11:55.000Z
|
python/ee/tests/data_test.py
|
sadsad0557/sadsad0557
|
67371ceca73196dfd2ecd03bf7d3f8b2c3368c34
|
[
"Apache-2.0"
] | null | null | null |
python/ee/tests/data_test.py
|
sadsad0557/sadsad0557
|
67371ceca73196dfd2ecd03bf7d3f8b2c3368c34
|
[
"Apache-2.0"
] | 3
|
2017-08-25T05:12:46.000Z
|
2019-06-28T07:17:03.000Z
|
#!/usr/bin/env python
"""Test for the ee.data module."""
import httplib2
import mock
import unittest
import ee
from ee import apitestcase
import ee.image as image
class DataTest(unittest.TestCase):
def testListOperations(self):
mock_http = mock.MagicMock(httplib2.Http)
# Return in three groups.
mock_http.request.side_effect = [
(httplib2.Response({
'status': 200
}), b'{"operations": [{"name": "name1"}], "nextPageToken": "t1"}'),
(httplib2.Response({
'status': 200
}), b'{"operations": [{"name": "name2"}], "nextPageToken": "t2"}'),
(httplib2.Response({
'status': 200
}), b'{"operations": [{"name": "name3"}]}'),
]
with apitestcase.UsingCloudApi(mock_http=mock_http):
self.assertEqual([{
'name': 'name1'
}, {
'name': 'name2'
}, {
'name': 'name3'
}], ee.data.listOperations())
def testListOperationsEmptyList(self):
# Empty lists don't appear at all in the result.
mock_http = mock.MagicMock(httplib2.Http)
mock_http.request.return_value = (httplib2.Response({'status': 200}), b'{}')
with apitestcase.UsingCloudApi(mock_http=mock_http):
self.assertEqual([], ee.data.listOperations())
def testSetAssetProperties(self):
mock_http = mock.MagicMock(httplib2.Http)
with apitestcase.UsingCloudApi(mock_http=mock_http), mock.patch.object(
ee.data, 'updateAsset', autospec=True) as mock_update_asset:
ee.data.setAssetProperties(
'foo', {'mYPropErTy': 'Value', 'system:time_start': 1})
asset_id = mock_update_asset.call_args[0][0]
self.assertEqual(asset_id, 'foo')
asset = mock_update_asset.call_args[0][1]
self.assertEqual(
asset['properties'],
{'mYPropErTy': 'Value', 'system:time_start': 1})
update_mask = mock_update_asset.call_args[0][2]
self.assertSetEqual(
set(update_mask), set([
'properties.\"mYPropErTy\"',
'properties.\"system:time_start\"'
]))
def testListAssets(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'assets': [{'path': 'id1', 'type': 'type1'}]}
cloud_api_resource.projects().assets().listAssets(
).execute.return_value = mock_result
cloud_api_resource.projects().assets().listAssets_next.return_value = None
actual_result = ee.data.listAssets({'p': 'q'})
cloud_api_resource.projects().assets().listAssets().\
execute.assert_called_once()
self.assertEqual(mock_result, actual_result)
def testListImages(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'images': [{'path': 'id1', 'type': 'type1'}]}
cloud_api_resource.projects().assets().listImages(
).execute.return_value = mock_result
cloud_api_resource.projects().assets().listImages_next.return_value = None
actual_result = ee.data.listImages({'p': 'q'})
cloud_api_resource.projects().assets().listImages(
).execute.assert_called_once()
self.assertEqual(mock_result, actual_result)
def testListBuckets(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'assets': [{'name': 'id1', 'type': 'FOLDER'}]}
cloud_api_resource.projects().listAssets(
).execute.return_value = mock_result
actual_result = ee.data.listBuckets()
cloud_api_resource.projects().listAssets(
).execute.assert_called_once()
self.assertEqual(mock_result, actual_result)
def testSimpleGetListViaCloudApi(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'assets': [{'name': 'id1', 'type': 'IMAGE_COLLECTION'}]}
cloud_api_resource.projects().assets().listAssets(
).execute.return_value = mock_result
actual_result = ee.data.getList({'id': 'glam', 'num': 3})
expected_params = {
'parent': 'projects/earthengine-public/assets/glam',
'pageSize': 3
}
expected_result = [{'id': 'id1', 'type': 'ImageCollection'}]
cloud_api_resource.projects().assets().listAssets.assert_called_with(
**expected_params)
self.assertEqual(expected_result, actual_result)
def testGetListAssetRootViaCloudApi(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'assets': [{'name': 'id1', 'type': 'IMAGE_COLLECTION'}]}
cloud_api_resource.projects().listAssets(
).execute.return_value = mock_result
actual_result = ee.data.getList(
{'id': 'projects/my-project/assets/', 'num': 3})
expected_params = {
'parent': 'projects/my-project',
'pageSize': 3
}
expected_result = [{'id': 'id1', 'type': 'ImageCollection'}]
cloud_api_resource.projects().listAssets.assert_called_with(
**expected_params)
self.assertEqual(expected_result, actual_result)
def testGetListAssetRootViaCloudApiNoSlash(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'assets': [{'name': 'id1', 'type': 'IMAGE_COLLECTION'}]}
cloud_api_resource.projects().listAssets(
).execute.return_value = mock_result
actual_result = ee.data.getList(
{'id': 'projects/my-project/assets', 'num': 3})
expected_params = {
'parent': 'projects/my-project',
'pageSize': 3
}
expected_result = [{'id': 'id1', 'type': 'ImageCollection'}]
cloud_api_resource.projects().listAssets.assert_called_with(
**expected_params)
self.assertEqual(expected_result, actual_result)
def testComplexGetListViaCloudApi(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {
'images': [{
'name': 'id1',
'size_bytes': 1234
}]
}
cloud_api_resource.projects().assets().listImages(
).execute.return_value = mock_result
actual_result = ee.data.getList({
'id': 'glam',
'num': 3,
'starttime': 3612345,
'filter': 'foo'
})
expected_params = {
'parent': 'projects/earthengine-public/assets/glam',
'pageSize': 3,
'startTime': '1970-01-01T01:00:12.345000Z',
'view': 'BASIC',
'filter': 'foo'
}
expected_result = [{'id': 'id1', 'type': 'Image'}]
cloud_api_resource.projects().assets().listImages.assert_called_with(
**expected_params)
self.assertEqual(expected_result, actual_result)
# The Cloud API context manager does not mock getAlgorithms, so it's done
# separately here.
@mock.patch.object(
ee.data,
'getAlgorithms',
return_value=apitestcase.GetAlgorithms(),
autospec=True)
def testGetDownloadId(self, _):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'name': 'projects/earthengine-legacy/thumbnails/DOCID'}
cloud_api_resource.projects().thumbnails().create(
).execute.return_value = mock_result
actual_result = ee.data.getDownloadId({
'image': image.Image('my-image'),
'name': 'dummy'
})
cloud_api_resource.projects().thumbnails().create(
).execute.assert_called_once()
self.assertEqual(
{
'docid': 'projects/earthengine-legacy/thumbnails/DOCID',
'token': ''
}, actual_result)
def testGetDownloadId_withBandList(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
mock_result = {'name': 'projects/earthengine-legacy/thumbnails/DOCID'}
cloud_api_resource.projects().thumbnails().create(
).execute.return_value = mock_result
actual_result = ee.data.getDownloadId({
'image': image.Image('my-image'),
'name': 'dummy',
'bands': ['B1', 'B2', 'B3']
})
cloud_api_resource.projects().thumbnails().create(
).execute.assert_called_once()
self.assertEqual(
{
'docid': 'projects/earthengine-legacy/thumbnails/DOCID',
'token': ''
}, actual_result)
def testGetDownloadId_withImageID(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
with self.assertRaisesRegex(ee.ee_exception.EEException,
'^Image ID string is not supported.'):
ee.data.getDownloadId({'id': 'my-image', 'name': 'dummy'})
def testGetDownloadId_withSerializedImage(self):
cloud_api_resource = mock.MagicMock()
with apitestcase.UsingCloudApi(cloud_api_resource=cloud_api_resource):
with self.assertRaisesRegex(ee.ee_exception.EEException,
'^Image as JSON string not supported.'):
ee.data.getDownloadId({
'image': image.Image('my-image').serialize(),
'name': 'dummy'
})
def testCloudProfilingEnabled(self):
seen = []
def ProfileHook(profile_id):
seen.append(profile_id)
with ee.data.profiling(ProfileHook):
with apitestcase.UsingCloudApi(), DoCloudProfileStubHttp(self, True):
ee.data.listImages({'parent': 'projects/earthengine-public/assets/q'})
self.assertEqual(['someProfileId'], seen)
def testCloudProfilingDisabled(self):
with apitestcase.UsingCloudApi(), DoCloudProfileStubHttp(self, False):
ee.data.listImages({'parent': 'projects/earthengine-public/assets/q'})
def testCloudErrorTranslation(self):
mock_http = mock.MagicMock(httplib2.Http)
mock_http.request.return_value = (httplib2.Response({'status': 400}),
b'{"error": {"message": "errorly"} }')
with apitestcase.UsingCloudApi(mock_http=mock_http):
with self.assertRaisesRegex(ee.ee_exception.EEException, '^errorly$'):
ee.data.listImages({'parent': 'projects/earthengine-public/assets/q'})
def DoCloudProfileStubHttp(test, expect_profiling):
def Request(unused_self, unused_url, method, body, headers):
_ = method, body # Unused kwargs.
test.assertEqual(expect_profiling,
ee.data._PROFILE_REQUEST_HEADER in headers)
response_dict = {
'status': 200,
'content-type': 'application/json'
}
if expect_profiling:
response_dict[
ee.data._PROFILE_RESPONSE_HEADER_LOWERCASE] = 'someProfileId'
response = httplib2.Response(response_dict)
return response, '{"data": "dummy_data"}'
return mock.patch('httplib2.Http.request', new=Request)
if __name__ == '__main__':
unittest.main()
| 39.17193
| 80
| 0.653529
|
2aad5038b21020dc5b08d173aadbf01890845b98
| 672
|
py
|
Python
|
djangocms_salesforce_forms/migrations/0006_auto_20171212_1453.py
|
divio/aldryn-salesforce-forms
|
9ab2b3fd7934e6b88a95377b8f3a9b3daa98d2d0
|
[
"BSD-3-Clause"
] | null | null | null |
djangocms_salesforce_forms/migrations/0006_auto_20171212_1453.py
|
divio/aldryn-salesforce-forms
|
9ab2b3fd7934e6b88a95377b8f3a9b3daa98d2d0
|
[
"BSD-3-Clause"
] | 2
|
2019-04-10T14:59:00.000Z
|
2019-05-14T06:24:36.000Z
|
djangocms_salesforce_forms/migrations/0006_auto_20171212_1453.py
|
divio/aldryn-salesforce-forms
|
9ab2b3fd7934e6b88a95377b8f3a9b3daa98d2d0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-12 16:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangocms_salesforce_forms', '0005_auto_20171211_1306'),
]
operations = [
migrations.AlterField(
model_name='formplugin',
name='redirect_type',
field=models.CharField(choices=[('redirect_to_page', 'CMS Page'), ('redirect_to_url', 'Absolute URL')], help_text='Where to redirect the user when the form has been successfully sent?', max_length=20, verbose_name='Redirect to'),
),
]
| 32
| 241
| 0.671131
|
53563ef82efe2d58e3b84c570182212d20a221f5
| 2,071
|
py
|
Python
|
browbeat/config.py
|
zulcss/browbeat
|
1aedcebcdabec0d92c0c0002a6ef458858629e88
|
[
"Apache-2.0"
] | 19
|
2019-07-12T08:46:58.000Z
|
2022-03-11T19:25:28.000Z
|
browbeat/config.py
|
zulcss/browbeat
|
1aedcebcdabec0d92c0c0002a6ef458858629e88
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:05:53.000Z
|
2022-03-30T07:05:53.000Z
|
browbeat/config.py
|
zulcss/browbeat
|
1aedcebcdabec0d92c0c0002a6ef458858629e88
|
[
"Apache-2.0"
] | 31
|
2019-06-10T20:08:44.000Z
|
2022-02-23T15:43:32.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import yaml
from pykwalify import core as pykwalify_core
from pykwalify import errors as pykwalify_errors
from browbeat.path import conf_schema_path
_logger = logging.getLogger("browbeat.config")
def load_browbeat_config(path):
"""Loads and validates an entire Browbeat config per the expected schema.
:param path: The path to the Browbeat Config file
"""
with open(path, "r") as config_file:
browbeat_config = yaml.safe_load(config_file)
_logger.debug("Browbeat config {} yaml loaded".format(path))
# Validate base config for Browbeat format
_validate_yaml("browbeat", browbeat_config)
_logger.info("Config {} validated".format(path))
# Validate per-workloads
for workload in browbeat_config["workloads"]:
_validate_yaml(workload["type"], workload)
_logger.debug("Workload {} validated as {}".format(workload["name"], workload["type"]))
return browbeat_config
def _validate_yaml(schema, config):
"""Raises exception if config is invalid.
:param schema: The schema to validate with (browbeat, rally...)
:param config: Loaded yaml to validate
"""
check = pykwalify_core.Core(
source_data=config, schema_files=["{}/{}.yml".format(conf_schema_path, schema)])
try:
check.validate(raise_exception=True)
except pykwalify_errors.SchemaError as e:
_logger.error("Schema validation failed")
raise Exception("File does not conform to {} schema: {}".format(schema, e))
| 36.333333
| 95
| 0.720908
|
3d13d348e7f5c54132dff2b8d93965d01a483c39
| 1,715
|
py
|
Python
|
catalog/bindings/csw/transaction_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/transaction_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/csw/transaction_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import List, Optional
from bindings.csw.delete_type import DeleteType
from bindings.csw.insert_type import InsertType
from bindings.csw.request_base_type import RequestBaseType
from bindings.csw.update_type import UpdateType
__NAMESPACE__ = "http://www.opengis.net/cat/csw/2.0.2"
@dataclass
class TransactionType(RequestBaseType):
"""Users may insert, update, or delete catalogue entries.
If the verboseResponse attribute has the value "true", then one or
more csw:InsertResult elements must be included in the response.
"""
insert: List[InsertType] = field(
default_factory=list,
metadata={
"name": "Insert",
"type": "Element",
"namespace": "http://www.opengis.net/cat/csw/2.0.2",
"sequential": True,
},
)
update: List[UpdateType] = field(
default_factory=list,
metadata={
"name": "Update",
"type": "Element",
"namespace": "http://www.opengis.net/cat/csw/2.0.2",
"sequential": True,
},
)
delete: List[DeleteType] = field(
default_factory=list,
metadata={
"name": "Delete",
"type": "Element",
"namespace": "http://www.opengis.net/cat/csw/2.0.2",
"sequential": True,
},
)
verbose_response: bool = field(
default=False,
metadata={
"name": "verboseResponse",
"type": "Attribute",
},
)
request_id: Optional[str] = field(
default=None,
metadata={
"name": "requestId",
"type": "Attribute",
},
)
| 28.583333
| 70
| 0.577259
|
17bde9483d36e9462555a0d4ce738027dddfa8da
| 1,040
|
py
|
Python
|
facebook/main.py
|
Joish/data_mining
|
244c41fa9427e7a8f9c3b66ebe9b70106e7486d2
|
[
"MIT"
] | null | null | null |
facebook/main.py
|
Joish/data_mining
|
244c41fa9427e7a8f9c3b66ebe9b70106e7486d2
|
[
"MIT"
] | null | null | null |
facebook/main.py
|
Joish/data_mining
|
244c41fa9427e7a8f9c3b66ebe9b70106e7486d2
|
[
"MIT"
] | null | null | null |
import os
from .stream import Listener
class FacebookStream():
def __init__(self, ACCESS_TOKEN):
self.ACCESS_TOKEN = ACCESS_TOKEN
self.run()
def get_filter_list(self):
cwd = os.path.dirname(os.path.realpath(__file__))
file_path = os.path.join(cwd, "filter_list.txt")
f = open(file_path, "r")
filter_list = f.read().split("\n")
filter_list = [_.replace(" ", '') for _ in filter_list if _]
return filter_list
def run(self):
print("STARTING FACEBOOK STREAM")
while(True):
posts = requests.get(url)
posts_json = posts.json()
for x1 in posts_json['data']:
postlst.append(x1.get('created_time'))
next_page = ""
try:
next_page = posts_json['paging']['next']
url = next_page
except:
break
if not next_page:
break
# print "Count: %s, Next Page: %s" % ( len(postlst), url)
| 27.368421
| 70
| 0.531731
|
d79d61dc388c498327ea1c2fad22914727dacace
| 324
|
py
|
Python
|
bin/experiments/tornado_hello.py
|
coreyabshire/marv
|
b2c791eb1b2b5be0f275e3b50d8df362df2e644e
|
[
"MIT"
] | null | null | null |
bin/experiments/tornado_hello.py
|
coreyabshire/marv
|
b2c791eb1b2b5be0f275e3b50d8df362df2e644e
|
[
"MIT"
] | 2
|
2017-10-18T22:48:16.000Z
|
2017-10-18T23:30:12.000Z
|
bin/experiments/tornado_hello.py
|
coreyabshire/marv
|
b2c791eb1b2b5be0f275e3b50d8df362df2e644e
|
[
"MIT"
] | null | null | null |
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
| 23.142857
| 46
| 0.67284
|
198dae373ecd1aa6c1e5d403abee0eb805848ec0
| 3,647
|
py
|
Python
|
frontend/tests/api/files_ext/test_controllers.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
frontend/tests/api/files_ext/test_controllers.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
frontend/tests/api/files_ext/test_controllers.py
|
vaginessa/irma
|
02285080b67b25ef983a99a765044683bd43296c
|
[
"Apache-2.0"
] | null | null | null |
import io
from unittest import TestCase
from mock import MagicMock, patch
import api.files_ext.controllers as api_files_ext
from api.files_ext.models import FileExt
from api.files_ext.schemas import FileExtSchema
class TestFilesExtRoutes(TestCase):
def assertIsFileExt(self, data):
self.assertTrue(type(data) == dict)
self.assertCountEqual(data.keys(), FileExtSchema().fields)
def assertIsFileExtList(self, data):
self.assertTrue(type(data) == list)
for file_ext in data:
self.assertIsFileExt(file_ext)
def setUp(self):
self.db = MagicMock()
self.session = self.db.session
self.old_db = api_files_ext.db
api_files_ext.db = self.db
def tearDown(self):
api_files_ext.db = self.old_db
del self.db
@patch("api.files_ext.controllers.FileExt")
def test_get_ok(self, m_FileExt):
api_version = 1
resultid = "whatever"
m_file = MagicMock()
m_fw = FileExt(m_file, "filename")
m_FileExt.load_from_ext_id.return_value = m_fw
result = api_files_ext.get(api_version, resultid)
m_FileExt.load_from_ext_id.assert_called_once_with(resultid,
self.session)
self.assertIsFileExt(result)
@patch("api.files_ext.controllers.FileExt")
def test_get_formatted_false(self, m_FileExt):
resultid = "whatever"
api_version = 1
m_file_ext = MagicMock()
m_file_ext.submitter = "webui"
m_FileExt.load_from_ext_id.return_value = m_file_ext
ret = api_files_ext.get(api_version, resultid, formatted="no")
m_FileExt.load_from_ext_id.assert_called_once_with(resultid,
self.session)
self.assertIsFileExt(ret)
@patch("api.files_ext.controllers.FileExt")
def test_get_error(self, m_FileExt):
api_version = 1
resultid = "whatever"
exception = ValueError()
m_FileExt.load_from_ext_id.side_effect = exception
with self.assertRaises(ValueError):
api_files_ext.get(api_version, resultid)
@patch("api.files_ext.controllers.File")
def test_create_ok(self, m_File):
m_file = MagicMock()
m_request = MagicMock()
data = b"DATA"
filename = "filename"
m_file.filename = filename
m_file.file = io.BytesIO(data)
m_request._params = {'files': m_file, 'json': '{"submitter": "cli"}'}
m_file_obj = MagicMock()
m_File.get_or_create.return_value = m_file_obj
api_files_ext.create(m_request)
@patch("api.files_ext.controllers.FileExt")
def test_add_files_no_file(self, m_FileExt):
m_request = MagicMock()
m_request._params['files'] = None
expected = "The \"files\" parameter is invalid. Empty list"
with self.assertRaises(api_files_ext.HTTPInvalidParam) as context:
api_files_ext.create(m_request)
self.assertEqual(context.exception.description, expected)
m_FileExt.assert_not_called()
@patch("api.files.controllers.FileExt")
def test_add_files_more_than_one_files(self, m_FileExt):
m_request = MagicMock()
m_request._params = {'files': ['file1', 'file2']}
expected = "The \"files\" parameter is invalid. " \
"Only one file at a time"
with self.assertRaises(api_files_ext.HTTPInvalidParam) as context:
api_files_ext.create(m_request)
self.assertEqual(context.exception.description, expected)
m_FileExt.assert_not_called()
| 37.214286
| 77
| 0.650946
|
822d21c05671db6f27e571d687d59d2297d466f9
| 1,496
|
py
|
Python
|
workspace/Python/CsvToExcel.py
|
PinkMemory-sudo/languages
|
3687240564919ecd864561b16b588b0b5f562acd
|
[
"MIT"
] | null | null | null |
workspace/Python/CsvToExcel.py
|
PinkMemory-sudo/languages
|
3687240564919ecd864561b16b588b0b5f562acd
|
[
"MIT"
] | null | null | null |
workspace/Python/CsvToExcel.py
|
PinkMemory-sudo/languages
|
3687240564919ecd864561b16b588b0b5f562acd
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 –*-
'''
程序用来将csv批量转换为excel文件。指定源路径和目标路径。
在main函数中指定源文件路径source,目标文件路径ob.
这个程序假设csv文件放在:"C:\\Users\\Administrator\\Desktop\\ceshi\\csv文件"
输出excel文件到:"C:\\Users\\Administrator\\Desktop\\ceshi\\xlsx文件"
'''
# 导入pandas
import pandas as pd
import os
# 建立单个文件的excel转换成csv函数,file 是excel文件名,to_file 是csv文件名。 sep=';'以分号分隔的csv文件;error_bad_lines=False 忽略错误行数据
def csv_to_xlsx(file, to_file):
data_csv = pd.read_csv(file) # 读取以分号为分隔符的csv文件 sep作用为指定分隔符,默认在Windows系统系分隔符为逗号
data_csv.to_excel(to_file, sheet_name='data',index=False)
# 读取一个目录里面的所有文件:
def read_path(path):
dirs = os.listdir(path)
return dirs
# 主函数
def main():
# 源文件路径
source = "/Users/chenguanlin/Documents/out"
# 目标文件路径
ob = "/Users/chenguanlin/Documents/2775"
# 将源文件路径里面的文件转换成列表file_list
file_list = [source + '/' + i for i in read_path(source)]
a = 0 # 列表索引csv文件名称放进j_list列表中,索引0即为第一个csv文件名称
j_list = read_path(source) # 文件夹中所有的csv文件名称提取出来按顺序放进j_list列表中
print("---->", read_path(source)) # read_path(source) 本身就是列表
print("read_path(source)类型:", type(read_path(source)))
# 建立循环对于每个文件调用excel_to_csv()
for it in file_list:
j = j_list[a] # 按照索引逐条将csv文件名称赋值给变量j
# 给目标文件新建一些名字列表
j_mid = str(j).replace(".csv", "") # 将csv文件名中的.csv后缀去掉
print("====", j_mid)
j_xlsx = ob + '/' + j_mid + ".xlsx"
csv_to_xlsx(it, j_xlsx)
print("######", it)
a = a + 1
if __name__ == '__main__':
main()
| 27.2
| 103
| 0.665775
|
c99df78c772471486293e6d514e2c516388bc6c9
| 503
|
py
|
Python
|
setup.py
|
homgwu/eprogress
|
26528661f08a25e3192b8a62e1113a6fa6e7bb06
|
[
"Apache-2.0"
] | 43
|
2017-07-25T03:38:16.000Z
|
2022-03-21T06:45:49.000Z
|
setup.py
|
threshi/eprogress
|
26528661f08a25e3192b8a62e1113a6fa6e7bb06
|
[
"Apache-2.0"
] | 3
|
2018-03-27T05:51:44.000Z
|
2020-06-03T11:35:52.000Z
|
setup.py
|
threshi/eprogress
|
26528661f08a25e3192b8a62e1113a6fa6e7bb06
|
[
"Apache-2.0"
] | 21
|
2017-08-07T06:35:39.000Z
|
2021-08-30T03:12:17.000Z
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import eprogress
setup(
name="eprogress",
version=eprogress.__version__,
packages=find_packages(),
author="HomgWu",
author_email="homgwu@gmail.com",
description="A simple and easy to use module for Python3 to print multi and single line progress bar in terminal",
license='Apache-2.0',
keywords=('multi line progress', 'progress bar', 'progress'),
url="https://github.com/homgwu/eprogress.git",
)
| 29.588235
| 118
| 0.701789
|
af9ac38fc0862af444df35b78636123039063808
| 5,081
|
py
|
Python
|
generator/generate.py
|
civicdatacoop/civicdatacoop
|
7244676c2d77f69e86445d973b979e8f92ba444f
|
[
"MIT"
] | null | null | null |
generator/generate.py
|
civicdatacoop/civicdatacoop
|
7244676c2d77f69e86445d973b979e8f92ba444f
|
[
"MIT"
] | null | null | null |
generator/generate.py
|
civicdatacoop/civicdatacoop
|
7244676c2d77f69e86445d973b979e8f92ba444f
|
[
"MIT"
] | null | null | null |
import datetime
import shutil
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from dataclasses import dataclass
# Path to TEMPLATES folder (relative to where you run the script)
PATH_TO_TEMPLATES = Path('../generator/TEMPLATES/')
# Path to RESOURCES folder (relative to where you run the script)
PATH_TO_RESOURCES = Path('../generator/RESOURCES/')
# Path to output folder (relative to where you run the script)
PATH_TO_OUTPUT = Path('../docs/')
# Root URL
URL_ROOT = "https://civicdatacooperative.com/"
# Link to homepage
link_to_homepage = "/" # TODO: always '/' in production
# File suffix
html_file_suffix = ".html"
@dataclass()
class Page(object):
title: str
keywords: str
description: str
content_file: str
url: str
language: str
last_mod: datetime.datetime
name: str
def keys(self):
"""Get keys that allows conversion of this class to dictionary.
Returns:
List[str]: List of the keys to be passed to template.
"""
return ['title', 'keywords', 'description', 'url', 'content_file',
'language', 'name']
def __getitem__(self, key):
"""Allows conversion of this class to dictionary.
"""
return getattr(self, key)
def generate_site(self):
with open(PATH_TO_TEMPLATES.joinpath('page.html')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
**dict(self),
link_to_homepage=link_to_homepage
)
return html_str
@property
def absolute_url(self):
if self.url != 'index':
return URL_ROOT + self.url + html_file_suffix
return URL_ROOT
@property
def last_modified(self):
if self.last_mod is None:
return None
return self.last_mod.strftime('%Y-%m-%d')
# Common meta tags
comm_keywords: str = "CIPHA, civic, data, synthetic, CPRD"
comm_description: str = "The Civic Data Cooperative is a part of the Faculty of Health and Life Sciences at the University of Liverpool working on projects that operate with data about citizens." # noqa: E501
# Pages definition
pages = [
Page(title="Liverpool City Region Civic Data Cooperative",
keywords=comm_keywords, # noqa: E501
description=comm_description, # noqa: E501
url="index",
content_file='page_home.html',
language="en",
last_mod=datetime.datetime(2022, 1, 1),
name="Home"
),
Page(title="LCR Civic Data Cooperative: About us",
keywords=comm_keywords, # noqa: E501
description=comm_description, # noqa: E501
url="about",
content_file='page_about.html',
language="en",
last_mod=datetime.datetime(2022, 1, 1),
name="About Us"
),
Page(title="LCR Civic Data Cooperative: Mission",
keywords=comm_keywords, # noqa: E501
description=comm_description, # noqa: E501
url="mission",
content_file='page_mission.html',
language="en",
last_mod=datetime.datetime(2022, 1, 1),
name="Mission"
),
Page(title="LCR Civic Data Cooperative: Contact",
keywords=comm_keywords, # noqa: E501
description=comm_description, # noqa: E501
url="contact",
content_file='page_contact.html',
language="en",
last_mod=datetime.datetime(2022, 1, 1),
name="Contact"
),
Page(title="LCR Civic Data Cooperative: License",
keywords=comm_keywords, # noqa: E501
description=comm_description, # noqa: E501
url="license",
content_file='page_license.html',
language="en",
last_mod=datetime.datetime(2022, 1, 1),
name="License"
),
]
# Remove all existing resources
if PATH_TO_OUTPUT.exists():
shutil.rmtree(PATH_TO_OUTPUT)
# Create new dir
PATH_TO_OUTPUT.mkdir()
for page in pages:
content = page.generate_site()
with PATH_TO_OUTPUT.joinpath(page.url + html_file_suffix).open('w') as fp:
fp.write(content)
# Copy resources
shutil.copytree(PATH_TO_RESOURCES, PATH_TO_OUTPUT, dirs_exist_ok=True)
# Generate site map (XML):
with open(PATH_TO_TEMPLATES.joinpath('site_map.xml')) as tem_han:
template = Environment(
loader=FileSystemLoader(PATH_TO_TEMPLATES)
).from_string(tem_han.read())
html_str = template.render(
sites=pages
)
with PATH_TO_OUTPUT.joinpath('sitemap.xml').open('w') as f_xml:
f_xml.write(html_str)
# Generate robots.txt file
robots_txt_content = f"""User-agent: *
Allow: /
Sitemap: {URL_ROOT}sitemap.xml"""
with PATH_TO_OUTPUT.joinpath('robots.txt').open('w') as robots_txt_h:
robots_txt_h.write(robots_txt_content)
| 32.363057
| 210
| 0.624483
|
bed6f0cbdf7b9ff29eb00444a3ed1e4d907ef440
| 659
|
py
|
Python
|
nabu/neuralnetworks/models/ed_decoders/ed_decoder_factory.py
|
imatge-upc/speech-2018-janna
|
f87d942a7225d7b38e0a650d27da8276a1954c47
|
[
"MIT"
] | 3
|
2019-02-15T03:28:42.000Z
|
2019-06-29T14:39:58.000Z
|
nabu/neuralnetworks/models/ed_decoders/ed_decoder_factory.py
|
imatge-upc/speech-2018-janna
|
f87d942a7225d7b38e0a650d27da8276a1954c47
|
[
"MIT"
] | null | null | null |
nabu/neuralnetworks/models/ed_decoders/ed_decoder_factory.py
|
imatge-upc/speech-2018-janna
|
f87d942a7225d7b38e0a650d27da8276a1954c47
|
[
"MIT"
] | 1
|
2019-04-19T06:58:47.000Z
|
2019-04-19T06:58:47.000Z
|
'''@file ed_decoder_factory
contains the ed decoder factory'''
from . import speller, dnn_decoder, hotstart_decoder, dynamic_routing
def factory(decoder):
'''gets an ed decoder class
Args:
decoder: the decoder type
Returns:
An EDDecoder class'''
if decoder == 'speller':
return speller.Speller
elif decoder == 'dnn_decoder':
return dnn_decoder.DNNDecoder
elif decoder == 'hotstart_decoder':
return hotstart_decoder.HotstartDecoder
elif decoder == 'dynamic_routing':
return dynamic_routing.DynamicRouting
else:
raise Exception('undefined decoder type: %s' % decoder)
| 26.36
| 69
| 0.681335
|
75329e6dc0c9b5d7836a69d9d72c3752371198ee
| 2,775
|
py
|
Python
|
setup.py
|
dcarr622/gmusicapi
|
c84beb6ade27b721b742abadeaa07ad3d68436be
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
dcarr622/gmusicapi
|
c84beb6ade27b721b742abadeaa07ad3d68436be
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
dcarr622/gmusicapi
|
c84beb6ade27b721b742abadeaa07ad3d68436be
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
import sys
# Only 2.6-2.7 are supported.
if not ((2, 6, 0) <= sys.version_info[:3] < (2, 8)):
sys.stderr.write('gmusicapi does not officially support this Python version.\n')
# try to continue anyway
dynamic_requires = []
if sys.version_info[:2] == (2, 6):
dynamic_requires += [
'unittest2 == 0.5.1', # parity with 2.7
'simplejson >= 3.0.6', # ensure_ascii
]
# This hack is from http://stackoverflow.com/a/7071358/1231454;
# the version is kept in a seperate file and gets parsed - this
# way, setup.py doesn't have to import the package.
VERSIONFILE = 'gmusicapi/_version.py'
version_line = open(VERSIONFILE).read()
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
match = re.search(version_re, version_line, re.M)
if match:
version = match.group(1)
else:
raise RuntimeError("Could not find version in '%s'" % VERSIONFILE)
setup(
name='gmusicapi',
version=version,
author='Simon Weber',
author_email='simon@simonmweber.com',
url='http://pypi.python.org/pypi/gmusicapi/',
packages=find_packages(),
scripts=[],
license=open('LICENSE').read(),
description='An unofficial api for Google Play Music.',
long_description=(open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read()),
install_requires=[
'validictory >= 0.8.0, != 0.9.2', # error messages
'decorator >= 3.3.1', # > 3.0 likely work, but not on pypi
'mutagen >= 1.18', # EasyID3 module renaming
'protobuf >= 2.4.1', # 2.3.0 uses ez_setup?
'requests >= 1.1.0, != 1.2.0, != 2.2.1', # session.close
'python-dateutil >= 1.3, != 2.0', # 2.0 is python3-only
'proboscis >= 1.2.5.1', # runs_after
'oauth2client >= 1.1', # TokenRevokeError
'mock >= 0.7.0', # MagicMock
'appdirs >= 1.1.0', # user_log_dir
'gpsoauth == 0.0.3', # mac -> android_id
] + dynamic_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Software Development :: Libraries :: Python Modules',
],
include_package_data=True,
zip_safe=False,
)
| 36.513158
| 86
| 0.561802
|
6151e1cd60f060a634af51fca1698f8c048c79b6
| 12,829
|
py
|
Python
|
sling/nlp/parser/trainer/trace.py
|
SasCezar/sling
|
809e21a9986d2522d5014b5836ba222498c099a2
|
[
"Apache-2.0"
] | null | null | null |
sling/nlp/parser/trainer/trace.py
|
SasCezar/sling
|
809e21a9986d2522d5014b5836ba222498c099a2
|
[
"Apache-2.0"
] | null | null | null |
sling/nlp/parser/trainer/trace.py
|
SasCezar/sling
|
809e21a9986d2522d5014b5836ba222498c099a2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stores the full trace of running the SLING parser on a document.
# A trace is a list of steps, and a step is a cascade of one or more actions.
# A trace is saved as a slot in the document frame.
#
# This file can also be used to compare two recordios with traces:
#
# python sling/nlp/parser/trainer/trace.py \
# --base=<base recordio file with tracing information> \
# --expt=<expt recordio file with tracing information> \
# --commons=<path to commons>
# [--diff=/path/where/sample/diff/will/be/stored.txt]
#
# This will compare base and expt document pairs, and verify equality in the
# following order: lstm features, ff features, predicted & final actions.
# At the first disparity, it will throw a ValueError with a diagnostic message
# and print the two documents in text format to the file specified by '--diff'.
# Tracing information.
class Trace:
# Represents a (predicted, final) parser action pair.
class Action:
def __init__(self, predicted, final, score=None):
self.predicted = predicted
self.final = final
self.score = score
# Returns the actions as frames.
def as_frames(self, store):
predicted = self.predicted.as_frame(store, slot_prefix='/trace/')
predicted['/trace/_str'] = str(self.predicted)
if self.score is not None:
predicted['/trace/score'] = score
final = predicted
if not self.final is self.predicted:
final = self.final.as_frame(store, slot_prefix='/trace/')
final['/trace/_str'] = str(self.final)
return (predicted, final)
# Represents a cascade of actions for one decoder step.
class Step:
def __init__(self, state, ff_features):
self.current = state.current
self.ff_features = ff_features
self.actions = []
# Adds a (predicted, final) action pair to the step.
def add_action(self, predicted, final, score=None):
self.actions.append(Trace.Action(predicted, final, score))
def __init__(self, spec, state, lstm_features):
self.spec = spec
self.lstm_features = lstm_features
self.document = state.document
self.begin = state.begin
self.end = state.end
# List of steps.
self.steps = []
# Adds a fresh decoder step with 'ff_features' as decoder features.
def start_step(self, state, ff_features):
self.steps.append(Trace.Step(state, ff_features))
# Adds an action to the latest decoder step.
def action(self, predicted, final, score=None):
assert len(self.steps) > 0
self.steps[-1].add_action(predicted, final, score)
# Writes encoder features to 'trace'.
def write_lstm_features(self, trace):
assert len(self.lstm_features) == len(self.spec.lstm_features)
store = self.document.store
tokens = self.document.tokens
frames = []
for i in xrange(len(tokens)):
frames.append(store.frame(\
{"/trace/index": i, "/trace/token": tokens[i].word}))
for f, vals in zip(self.spec.lstm_features, self.lstm_features):
assert len(vals.indices) == len(tokens)
for token, values in enumerate(vals.indices):
if type(values) is int:
values = [values]
assert type(values) is list
frames[token]["/trace/" + f.name] = values
frames_array = store.array(len(frames))
for i, frame in enumerate(frames):
frames_array[i] = frame
trace["/trace/lstm_features"] = frames_array
# Writes step tracing information to 'trace'.
def write_steps(self, trace):
store = self.document.store
steps = store.array(len(self.steps))
trace["/trace/steps"] = steps
for i, step in enumerate(self.steps):
word = "<EOS>"
if step.current < len(self.document.tokens):
word = self.document.tokens[step.current].word
# Decoder features.
ff_features = []
for f, indices in step.ff_features:
# Convert 'None' link feature values to -1.
indices = [-1 if index is None else index for index in indices]
ff_features.append(\
store.frame({"/trace/feature": f.name, "/trace/values": indices}))
# Actions in the step.
actions = store.array(len(step.actions))
for idx, action in enumerate(step.actions):
(predicted, final) = action.as_frames(store)
actions[idx] = store.frame(\
{"/trace/predicted": predicted, "/trace/final": final})
frame = store.frame({
"/trace/index": i,
"/trace/current": step.current,
"/trace/current_word": word,
"/trace/ff_features" : ff_features,
"/trace/actions" : actions
})
steps[i] = frame
# Writes the trace to the underlying document.
def write(self):
trace = self.document.store.frame({"begin": self.begin, "end": self.end})
self.write_lstm_features(trace)
self.write_steps(trace)
self.document.frame["trace"] = trace
if __name__ == "__main__":
# Compares traces of two aligned recordio files.
import os
import sling
import sling.flags as flags
# Utility to check assertions and throw an error if the assertion is
# violated.
class Checker:
# Initializes the checker with the document index, the base and expt
# documents, and the filename where the first discrepancy is stored.
def __init__(self, index, base_doc, expt_doc, diff_file=None):
self.index = index
self.base_doc = base_doc
self.expt_doc = expt_doc
self.diff_file = diff_file
# Sanity check: the two documents should have the same tokens.
if len(base_doc.tokens) != len(expt_doc.tokens):
self.error('Differing number of tokens at document %d' % index)
for i in xrange(len(base_doc.tokens)):
self.check_eq(base_doc.tokens[i].word, expt_doc.tokens[i].word, \
'token %d word' % i)
self.check_eq(base_doc.tokens[i].brk, expt_doc.tokens[i].brk, \
"token %d brk" % i)
# Throws an error with 'message', and writes the document pair to the
# pre-specified file.
def error(self, message):
if self.diff_file is not None:
with open(self.diff_file, 'w') as f:
f.write("Document Index:" + str(self.index) + "\n")
f.write("Base document\n")
f.write(self.base_doc.frame.data(pretty=True))
f.write('\n\n')
f.write("Expt document\n")
f.write(self.expt_doc.frame.data(pretty=True))
f.write('\n\n')
f.write(message)
print "One pair of differing docs written to", self.diff_file
raise ValueError(message)
# Checks that lhs == rhs.
def check_eq(self, lhs, rhs, message):
if lhs != rhs:
# Augment the message with the document index and the two values.
message = ("Document %d " % self.index) + message + ": %s vs %s"
self.error(message % (str(lhs), str(rhs)))
# Checks that the two frames are equal (modulo slot re-ordering), and
# ignoring roles in 'ignored_slots', which should be a list of names
# of roles that should be ignored.
def frame_eq(self, lhs, rhs, message, ignored_slots=None):
if ignored_slots is None: ignored_slots = []
lhs_set = set()
for key, value in lhs:
if key.id not in ignored_slots:
lhs_set.add((key.id, value))
rhs_set = set()
for key, value in rhs:
if key.id not in ignored_slots:
rhs_set.add((key.id, value))
diff = lhs_set.symmetric_difference(rhs_set)
if len(diff) > 0:
# Augment the message and report error.
message = ("Document %d " % self.index) + message
message += ", %s vs %s" % (lhs.data(), rhs.data())
message += ", symmetric difference = " + str(diff)
self.error(message)
# Compares two recordios for equality of tracing, stopping at the first error.
def compare(arg):
base_reader = sling.RecordReader(arg.base)
expt_reader = sling.RecordReader(arg.expt)
commons = sling.Store()
commons.load(arg.commons)
schema = sling.DocumentSchema(commons)
commons.freeze()
store = sling.Store(commons)
index = -1
for (_, base_val), (_, expt_val) in zip(base_reader, expt_reader):
index += 1
base_doc = sling.Document(frame=store.parse(base_val), schema=schema)
expt_doc = sling.Document(frame=store.parse(expt_val), schema=schema)
# Basic checks.
base = base_doc.frame["trace"]
expt = expt_doc.frame["trace"]
if base is None and expt_doc is not None:
checker.error('No trace in base document at index %d' % index)
elif base is not None and expt_doc is None:
checker.error('No trace in expt document at index %d' % index)
if base is None:
continue
# Traces should be over the same token range.
checker = Checker(index, base_doc, expt_doc, arg.diff)
checker.check_eq(base["begin"], expt["begin"], "Trace Begin")
checker.check_eq(base["end"], expt["end"], "Trace End")
# Check LSTM features.
base_lstm = base["/trace/lstm_features"]
expt_lstm = expt["/trace/lstm_features"]
checker.check_eq(len(base_lstm), len(expt_lstm), "LSTM Features Length")
for i in xrange(len(base_lstm)):
checker.frame_eq(base_lstm[i], expt_lstm[i], \
"LSTM features for token %d (%s)" % (i, base_doc.tokens[i].word))
# Check steps.
base_steps = base["/trace/steps"]
expt_steps = expt["/trace/steps"]
min_steps = min(len(base_steps), len(expt_steps))
for i in xrange(min_steps):
message = "Step %d's current token index" % i
checker.check_eq(base_steps[i]["/trace/current"], \
expt_steps[i]["/trace/current"], message)
# Check FF features for the step.
base_ff = base_steps[i]["/trace/ff_features"]
expt_ff = expt_steps[i]["/trace/ff_features"]
checker.check_eq(len(base_ff), len(expt_ff), \
"# of FF features for step %d" % i)
base_dict = {f["/trace/feature"] : f["/trace/values"] for f in base_ff}
expt_dict = {f["/trace/feature"] : f["/trace/values"] for f in expt_ff}
for k, v in base_dict.iteritems():
checker.check_eq(k in expt_dict, True, \
"Step %d: FF feature %s not in expt" % (i, k))
checker.check_eq(v, expt_dict[k], \
"Step %d: FF feature %s has a different value in expt" % (i, k))
for k, v in expt_dict.iteritems():
checker.check_eq(k in base_dict, True, \
"Step %d: FF feature %s not in base" % (i, k))
# Check action(s) in the step.
base_actions = base_steps[i]["/trace/actions"]
expt_actions = expt_steps[i]["/trace/actions"]
for idx in xrange(min(len(base_actions), len(expt_actions))):
checker.frame_eq(base_actions[idx]["/trace/predicted"], \
expt_actions[idx]["/trace/predicted"],
"Step %d, predicted action %d" % (i, idx),
["/trace/_str"])
checker.frame_eq(base_actions[idx]["/trace/final"], \
expt_actions[idx]["/trace/final"],
"Step %d, final action %d" % (i, idx),
["/trace/_str"])
# There should be the same number of actions in the step.
checker.check_eq(len(base_actions), len(expt_actions), \
"Step %d: # of actions" % i)
# There should be the same number of steps.
checker.check_eq(len(base_steps), len(expt_steps), "# of Steps")
base_reader.close()
expt_reader.close()
flags.define('--base',
help='Base recordio',
default="",
type=str,
metavar='FILE')
flags.define('--expt',
help='Expt recordio',
default="",
type=str,
metavar='FILE')
flags.define('--commons',
help='Commons',
default="",
type=str,
metavar='FILE')
flags.define('--diff',
help='File where sample diff (if any) will be written',
default="/tmp/diff.txt",
type=str,
metavar='FILE')
flags.parse()
assert os.path.exists(flags.arg.base)
assert os.path.exists(flags.arg.expt)
assert os.path.exists(flags.arg.commons)
compare(flags.arg)
| 37.732353
| 80
| 0.631304
|
7b3fc7b9b7b233717444e50b864512ecee1a85a6
| 1,370
|
py
|
Python
|
OpenWeatherMap.py
|
BrianDunneKK/GettingStarted-Python
|
9fc666ae73401921a73aa99c02a4d6691daed97a
|
[
"MIT"
] | null | null | null |
OpenWeatherMap.py
|
BrianDunneKK/GettingStarted-Python
|
9fc666ae73401921a73aa99c02a4d6691daed97a
|
[
"MIT"
] | null | null | null |
OpenWeatherMap.py
|
BrianDunneKK/GettingStarted-Python
|
9fc666ae73401921a73aa99c02a4d6691daed97a
|
[
"MIT"
] | null | null | null |
import pyowm
from datetime import datetime, timedelta
loc = 'Kilkenny,Ireland'
owm = pyowm.OWM('f8c5ed2d5acaf51d22e984cb02a99d7a')
observation = owm.weather_at_place(loc)
w = observation.get_weather()
wi = w.get_wind() # {'speed': 4.6, 'deg': 330}
hm = w.get_humidity() # 87
tm = w.get_temperature('celsius') # {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}
print(w)
print(wi)
print(hm)
print(tm)
f3 = owm.three_hours_forecast(loc)
# fd = owm.daily_forecast(loc) # Your API subscription level does not allow to perform this operation
# fh = owm.weather_history_at_place(loc) # Your API subscription level does not allow to perform this operation
print("\nForecast:")
for fcast in f3._forecast._weathers:
ref_time = fcast.get_reference_time(timeformat='iso')
temp = fcast.get_temperature(unit='celsius')["temp"]
details = fcast.get_detailed_status()
desc = " {} {:4.1f}°C {}".format(ref_time, temp, details)
print(desc)
# COUNTRY = 'UK'
# forecast = owm.daily_forecast('london,uk')
# forecast_date = datetime.now() + timedelta(days = 1, hours = 3)
# weather = forecast.get_weather_at(forecast_date)
# description = weather.get_detailed_status()
# clouds = weather.get_clouds()
# temperature = weather.get_temperature()
# wind = weather.get_wind()
# rain = weather.get_rain()
# print(forecast)
| 33.414634
| 114
| 0.69562
|
b3c051371228a07a73436dd29f8ee666e25f67d9
| 10,353
|
py
|
Python
|
gamestonk_terminal/cryptocurrency/onchain/bitquery_view.py
|
b17z/GamestonkTerminal
|
135eedb8d995fd10baff6283de525e8a4d124cae
|
[
"MIT"
] | 1
|
2022-02-18T04:02:52.000Z
|
2022-02-18T04:02:52.000Z
|
gamestonk_terminal/cryptocurrency/onchain/bitquery_view.py
|
b17z/GamestonkTerminal
|
135eedb8d995fd10baff6283de525e8a4d124cae
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/cryptocurrency/onchain/bitquery_view.py
|
b17z/GamestonkTerminal
|
135eedb8d995fd10baff6283de525e8a4d124cae
|
[
"MIT"
] | null | null | null |
"""The BitQuery view"""
__docformat__ = "numpy"
import logging
import os
from gamestonk_terminal.cryptocurrency.dataframe_helpers import (
prettify_column_names,
lambda_very_long_number_formatter,
)
from gamestonk_terminal.cryptocurrency.onchain import bitquery_model
from gamestonk_terminal.decorators import log_start_end
from gamestonk_terminal.helper_funcs import export_data, print_rich_table
from gamestonk_terminal.rich_config import console
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def display_dex_trades(
trade_amount_currency: str = "USD",
kind: str = "dex",
top: int = 20,
days: int = 90,
sortby: str = "tradeAmount",
descend: bool = False,
export: str = "",
) -> None:
"""Trades on Decentralized Exchanges aggregated by DEX or Month [Source: https://graphql.bitquery.io/]
Parameters
----------
kind: str
Aggregate trades by dex or time
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
days: int
Last n days to query data. Maximum 365 (bigger numbers can cause timeouts
on server side)
export : str
Export dataframe data to csv,json,xlsx file
"""
if kind == "time":
df = bitquery_model.get_dex_trades_monthly(trade_amount_currency, days)
if not df.empty:
df = df.sort_values(by="date", ascending=descend)
else:
df = bitquery_model.get_dex_trades_by_exchange(trade_amount_currency, days)
if not df.empty:
df = df.sort_values(by=sortby, ascending=descend)
if not df.empty:
df_data = df.copy()
df[["tradeAmount", "trades"]] = df[["tradeAmount", "trades"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Trades on Decentralized Exchanges",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"lt",
df_data,
)
@log_start_end(log=logger)
def display_daily_volume_for_given_pair(
token: str = "WBTC",
vs: str = "USDT",
top: int = 20,
sortby: str = "date",
descend: bool = False,
export: str = "",
) -> None:
"""Display daily volume for given pair
[Source: https://graphql.bitquery.io/]
Parameters
----------
token: str
ERC20 token symbol or address
vs: str
Quote currency.
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Token volume on different decentralized exchanges
"""
df = bitquery_model.get_daily_dex_volume_for_given_pair(
token=token,
vs=vs,
limit=top,
)
if df.empty:
return
df = df.sort_values(by=sortby, ascending=descend)
df_data = df.copy()
df[["tradeAmount", "trades"]] = df[["tradeAmount", "trades"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Daily Volume for Pair",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"dvcp",
df_data,
)
@log_start_end(log=logger)
def display_dex_volume_for_token(
token: str = "WBTC",
trade_amount_currency: str = "USD",
top: int = 10,
sortby: str = "tradeAmount",
descend: bool = False,
export: str = "",
) -> None:
"""Display token volume on different Decentralized Exchanges. [Source: https://graphql.bitquery.io/]
Parameters
----------
token: str
ERC20 token symbol or address
trade_amount_currency: str
Currency of displayed trade amount. Default: USD
top: int
Number of records to display
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Token volume on different decentralized exchanges
"""
df = bitquery_model.get_token_volume_on_dexes(
token=token, trade_amount_currency=trade_amount_currency
)
if not df.empty:
df = df.sort_values(by=sortby, ascending=descend)
df_data = df.copy()
df[["tradeAmount", "trades"]] = df[["tradeAmount", "trades"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Token Volume on Exchanges",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"tv",
df_data,
)
@log_start_end(log=logger)
def display_ethereum_unique_senders(
interval: str = "days",
limit: int = 10,
sortby: str = "date",
descend: bool = False,
export: str = "",
) -> None:
"""Display number of unique ethereum addresses which made a transaction in given time interval
[Source: https://graphql.bitquery.io/]
Parameters
----------
interval: str
Time interval in which ethereum address made transaction. month, week or day
limit: int
Number of records to display. It's calculated base on provided interval.
If interval is month then calculation is made in the way: limit * 30 = time period,
in case if interval is set to week, then time period is calculated as limit * 7.
For better user experience maximum time period in days is equal to 90.
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Number of unique ethereum addresses which made a transaction in given time interval
"""
df = bitquery_model.get_ethereum_unique_senders(interval, limit)
if not df.empty:
df = df.sort_values(by=sortby, ascending=descend)
df[["uniqueSenders", "transactions", "maximumGasPrice"]] = df[
["uniqueSenders", "transactions", "maximumGasPrice"]
].applymap(lambda x: lambda_very_long_number_formatter(x))
df_data = df.copy()
df.columns = prettify_column_names(df.columns)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Unique Ethereum Addresses",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ueat",
df_data,
)
@log_start_end(log=logger)
def display_most_traded_pairs(
exchange="Uniswap",
days: int = 10,
top: int = 10,
sortby: str = "tradeAmount",
descend: bool = False,
export: str = "",
) -> None:
"""Display most traded crypto pairs on given decentralized exchange in chosen time period.
[Source: https://graphql.bitquery.io/]
Parameters
----------
exchange:
Decentralized exchange name
days:
Number of days taken into calculation account.
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Most traded crypto pairs on given decentralized exchange in chosen time period.
"""
df = bitquery_model.get_most_traded_pairs(exchange=exchange, limit=days)
if not df.empty:
df = df.sort_values(by=sortby, ascending=descend)
df_data = df.copy()
df[["tradeAmount", "trades"]] = df[["tradeAmount", "trades"]].applymap(
lambda x: lambda_very_long_number_formatter(x)
)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df.head(top),
headers=list(df.columns),
show_index=False,
title="Most Traded Crypto Pairs",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"ttcp",
df_data,
)
@log_start_end(log=logger)
def display_spread_for_crypto_pair(
token="ETH",
vs="USDC",
days: int = 10,
sortby: str = "date",
descend: bool = False,
export: str = "",
) -> None:
"""Display an average bid and ask prices, average spread for given crypto pair for chosen time period.
[Source: https://graphql.bitquery.io/]
Parameters
----------
days: int
Last n days to query data
token: str
ERC20 token symbol
vs: str
Quoted currency.
sortby: str
Key by which to sort data
descend: bool
Flag to sort data descending
export : str
Export dataframe data to csv,json,xlsx file
Returns
-------
pd.DataFrame
Average bid and ask prices, spread for given crypto pair for chosen time period
"""
df = bitquery_model.get_spread_for_crypto_pair(token=token, vs=vs, limit=days)
if not df.empty:
df = df.sort_values(by=sortby, ascending=descend)
df.columns = prettify_column_names(df.columns)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title="Average Spread for Given Crypto",
)
console.print("")
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"baas",
df,
)
| 27.461538
| 106
| 0.61132
|
96f16834fcc2ff5066f3916911f749b04f93a7f9
| 45,979
|
py
|
Python
|
tensorflow/python/data/experimental/ops/data_service_ops.py
|
dongxiao92/tensorflow
|
72ac61dbed0ea43dce96431b2b72aa0a7bc83a93
|
[
"Apache-2.0"
] | 2
|
2021-03-02T12:53:15.000Z
|
2021-10-20T21:28:21.000Z
|
tensorflow/python/data/experimental/ops/data_service_ops.py
|
dongxiao92/tensorflow
|
72ac61dbed0ea43dce96431b2b72aa0a7bc83a93
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/experimental/ops/data_service_ops.py
|
dongxiao92/tensorflow
|
72ac61dbed0ea43dce96431b2b72aa0a7bc83a93
|
[
"Apache-2.0"
] | 2
|
2021-11-02T15:46:38.000Z
|
2022-03-08T11:39:35.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python API for executing a tf.data.Dataset using a tf.data service."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import six
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import compression_ops
from tensorflow.python.data.experimental.ops.distribute_options import AutoShardPolicy
from tensorflow.python.data.experimental.ops.distribute_options import ExternalStatePolicy
from tensorflow.python.data.experimental.service import _pywrap_server_lib
from tensorflow.python.data.experimental.service import _pywrap_utils
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import gen_experimental_dataset_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.util import lazy_loader
from tensorflow.python.util.tf_export import tf_export
COMPRESSION_AUTO = "AUTO"
COMPRESSION_NONE = None
# TODO(b/176933539): Use the regular import.
nested_structure_coder = lazy_loader.LazyLoader(
"nested_structure_coder", globals(),
"tensorflow.python.saved_model.nested_structure_coder")
class ProcessingMode(object):
"""tf.data service processing modes."""
PARALLEL_EPOCHS = "parallel_epochs"
DISTRIBUTED_EPOCH = "distributed_epoch"
@staticmethod
def validate(mode):
"""Raises a ValueError if the given object is not a valid processing mode."""
valid_modes = [
ProcessingMode.PARALLEL_EPOCHS, ProcessingMode.DISTRIBUTED_EPOCH
]
if mode not in valid_modes:
raise ValueError(
"{0} is not a valid processing mode. Valid modes: {1}".format(
mode, valid_modes))
def _check_job_name(job_name):
if job_name is None:
return
if not isinstance(job_name, six.string_types):
raise ValueError(
"job_name must be a string, but job_name was of type "
"{0}. job_name={1}".format(type(job_name), job_name))
if not job_name:
raise ValueError("job_name must not be empty")
class _DataServiceDatasetV2(dataset_ops.DatasetSource):
"""A `Dataset` that reads elements from the tf.data service."""
def __init__(self,
dataset_id,
processing_mode,
address,
element_spec,
protocol,
data_transfer_protocol,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None,
target_workers="AUTO"):
"""Constructs a _DataServiceDatasetV2.
Args:
dataset_id: The dataset id for the dataset to read from.
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have
each tf.data worker process a copy of the dataset, or
"distributed_epoch" to split a single iteration of the dataset across
all the workers.
address: The tf.data service address, e.g. "localhost:5000".
element_spec: The dataset element spec for the dataset to read from.
protocol: The protocol to use for communicating with the tf.data service,
e.g. "grpc".
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. By default, data is transferred using
gRPC.
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string or Tensor. This argument makes it possible
for multiple datasets to share the same job. The default behavior is
that the dataset creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from
`0` to `num_consumers`. Must be specified alongside `num_consumers`.
When specified, consumers will read from the job in a strict round-robin
order, instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead
of the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out
of sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the
amount of memory used, since `distribute` won't use more than
`element_size` * `max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query
the dispatcher for task changes.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`,
tf.data runtime decides which workers to read from. If `"ANY"`, reads
from any tf.data service workers. If `"LOCAL"`, only reads from local
in-processs tf.data service workers. `"AUTO"` works well for most cases,
while users can specify other targets. For example, `"LOCAL"` helps
avoid RPCs and data copy if every TF worker colocates with a tf.data
service worker. Defaults to `"AUTO"`.
"""
if consumer_index is None != num_consumers is None:
raise ValueError(
"Must either set both consumer_index and num_consumers, or neither. ",
"consumer_index: ", consumer_index, ", num_consumers: ",
num_consumers)
if num_consumers is not None and job_name is None:
raise ValueError("job_name must be set when setting num_consumers")
if job_name is None:
job_name = ""
if max_outstanding_requests is None:
max_outstanding_requests = dataset_ops.AUTOTUNE
if task_refresh_interval_hint_ms is None:
task_refresh_interval_hint_ms = dataset_ops.AUTOTUNE
self._dataset_id = ops.convert_to_tensor(
dataset_id, dtype=dtypes.int64, name="dataset_id")
self._processing_mode = ops.convert_to_tensor(
processing_mode, dtype=dtypes.string, name="processing_mode")
self._address = ops.convert_to_tensor(
address, dtype=dtypes.string, name="address")
self._protocol = ops.convert_to_tensor(
protocol, dtype=dtypes.string, name="protocol")
self._job_name = ops.convert_to_tensor(
job_name, dtype=dtypes.string, name="job_name")
self._consumer_index = ops.convert_to_tensor(
-1 if consumer_index is None else consumer_index,
dtype=dtypes.int64,
name="consumer_index")
self._num_consumers = ops.convert_to_tensor(
-1 if num_consumers is None else num_consumers,
dtype=dtypes.int64,
name="num_consumers")
self._max_outstanding_requests = ops.convert_to_tensor(
max_outstanding_requests,
dtype=dtypes.int64,
name="max_outstanding_requests")
self._element_spec = element_spec
self._target_workers = target_workers
compat_kwargs = {}
if data_transfer_protocol is not None:
compat_kwargs["data_transfer_protocol"] = data_transfer_protocol
if compat.forward_compatible(2021, 7, 12) or target_workers != "AUTO":
compat_kwargs["target_workers"] = target_workers
variant_tensor = gen_experimental_dataset_ops.data_service_dataset_v2(
dataset_id=self._dataset_id,
processing_mode=self._processing_mode,
address=self._address,
protocol=self._protocol,
job_name=self._job_name,
consumer_index=self._consumer_index,
num_consumers=self._num_consumers,
max_outstanding_requests=self._max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
iteration_counter=gen_experimental_dataset_ops.dummy_iteration_counter(
),
**compat_kwargs,
**self._flat_structure)
super(_DataServiceDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
class _DataServiceDatasetV1(dataset_ops.DatasetV1Adapter):
"""A `Dataset` that executes its input through the tf.data service."""
@functools.wraps(_DataServiceDatasetV2.__init__)
def __init__(self, dataset_id, processing_mode, address, element_spec,
protocol, data_transfer_protocol, job_name, consumer_index,
num_consumers, max_outstanding_requests,
task_refresh_interval_hint_ms, target_workers):
self._wrapped = _DataServiceDatasetV2(
dataset_id=dataset_id,
processing_mode=processing_mode,
address=address,
element_spec=element_spec,
protocol=protocol,
data_transfer_protocol=data_transfer_protocol,
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
target_workers=target_workers)
super(_DataServiceDatasetV1, self).__init__(self._wrapped)
if tf2.enabled():
_DataServiceDataset = _DataServiceDatasetV2
else:
_DataServiceDataset = _DataServiceDatasetV1
def _parse_service(service):
"""Converts a tf.data service string into a (protocol, address) tuple.
Args:
service: A string in the format "protocol://address" or just "address". If
the string is only an address, the default protocol will be used.
Returns:
The (protocol, address) tuple
"""
if not isinstance(service, six.string_types):
raise ValueError(
"service must be a string, but service was of type {0}. service={1}"
.format(type(service), service))
if not service:
raise ValueError("service must not be empty")
parts = service.split("://")
if len(parts) == 2:
protocol, address = parts
elif len(parts) == 1:
address = parts[0]
protocol = _pywrap_utils.TF_DATA_DefaultProtocol()
else:
raise ValueError("malformed service string has multiple '://': %s" %
service)
# TODO(aaudibert): Considering validating reachability of address here.
return (protocol, address)
def _distribute(processing_mode,
service,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None,
data_transfer_protocol=None,
compression="AUTO",
target_workers="AUTO"):
"""A transformation that moves dataset processing to the tf.data service.
This transformation is similar to `distribute`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have each
tf.data worker process a copy of the dataset, or "distributed_epoch" to
split a single iteration of the dataset across all the workers.
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from `0`
to `num_consumers`. Must be specified alongside `num_consumers`. When
specified, consumers will read from the job in a strict round-robin order,
instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead of
the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out of
sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
dispatcher for task changes.
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. By default, data is transferred using gRPC.
compression: How to compress the dataset's elements before transferring them
over the network. "AUTO" leaves the decision of how to compress up to the
tf.data service runtime. `None` indicates not to compress.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
runtime decides which workers to read from. If `"ANY"`, reads from any
tf.data service workers. If `"LOCAL"`, only reads from local in-processs
tf.data service workers. `"AUTO"` works well for most cases, while users
can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
data copy if every TF worker colocates with a tf.data service worker.
Defaults to `"AUTO"`.
Returns:
Dataset: A `Dataset` of the elements produced by the data service.
"""
ProcessingMode.validate(processing_mode)
valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]
if compression not in valid_compressions:
raise ValueError(
"Invalid compression argument: {}. Must be one of {}".format(
compression, valid_compressions))
if compression == COMPRESSION_AUTO and data_transfer_protocol is not None:
compression = COMPRESSION_NONE
def _apply_fn(dataset): # pylint: disable=missing-docstring
dataset_id = _register_dataset(service, dataset, compression=compression)
return _from_dataset_id(
processing_mode,
service,
dataset_id,
dataset.element_spec,
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
data_transfer_protocol=data_transfer_protocol,
compression=compression,
target_workers=target_workers)
return _apply_fn
@tf_export("data.experimental.service.distribute")
def distribute(processing_mode,
service,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
data_transfer_protocol=None,
compression="AUTO",
target_workers="AUTO"):
"""A transformation that moves dataset processing to the tf.data service.
When you iterate over a dataset containing the `distribute` transformation,
the tf.data service creates a "job" which produces data for the dataset
iteration.
The tf.data service uses a cluster of workers to prepare data for training
your model.
The `processing_mode` argument to `tf.data.experimental.service.distribute`
describes how to leverage multiple workers to process the input dataset.
Currently, there are two processing modes to choose from: "distributed_epoch"
and "parallel_epochs".
"distributed_epoch" means that the dataset will be split across all tf.data
service workers.
The dispatcher produces "splits" for the dataset and sends them to workers for
further processing. For example, if a dataset begins with a list of filenames,
the dispatcher will iterate through the filenames and send the filenames to
tf.data workers, which will perform the rest of the dataset transformations on
those files. "distributed_epoch" is useful when your model needs to see each
element of the dataset exactly once, or if it needs to see the data in a
generally-sequential order. "distributed_epoch" only works for datasets with
splittable sources, such as `Dataset.from_tensor_slices`,
`Dataset.list_files`, or `Dataset.range`.
"parallel_epochs" means that the entire input dataset will be processed
independently by each of the tf.data service workers.
For this reason, it is important to shuffle data (e.g. filenames)
non-deterministically, so that each worker will process the elements of the
dataset in a different order. "parallel_epochs" can be used to distribute
datasets that aren't splittable.
With two workers, "parallel_epochs" will produce every element of the dataset
twice:
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> # Start two workers
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="parallel_epochs", service=dispatcher.target))
>>> print(sorted(list(dataset.as_numpy_iterator())))
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9]
"distributed_epoch", on the other hand, will still produce each element once:
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode="distributed_epoch", service=dispatcher.target))
>>> print(sorted(list(dataset.as_numpy_iterator())))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
When using `apply(tf.data.experimental.service.distribute(...))`, the dataset
before the `apply` transformation executes within the tf.data service, while
the operations after `apply` happen within the local process.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> workers = [
... tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address)) for _ in range(2)
... ]
>>> dataset = tf.data.Dataset.range(5)
>>> dataset = dataset.map(lambda x: x*x)
>>> dataset = dataset.apply(
... tf.data.experimental.service.distribute("parallel_epochs",
... dispatcher.target))
>>> dataset = dataset.map(lambda x: x+1)
>>> print(sorted(list(dataset.as_numpy_iterator())))
[1, 1, 2, 2, 5, 5, 10, 10, 17, 17]
In the above example, the dataset operations (before applying the `distribute`
function on the elements) will be executed on the tf.data workers,
and the elements are provided over RPC. The remaining transformations
(after the call to `distribute`) will be executed locally. The dispatcher
and the workers will bind to usused free ports (which are chosen at random),
in order to communicate with each other. However, to bind them to specific
ports, the `port` parameter can be passed.
The `job_name` argument allows jobs to be shared across multiple
datasets. Instead of each dataset creating its own job, all
datasets with the same `job_name` will consume from the same job. A new job
will be created for each iteration of the dataset (with each repetition of
`Dataset.repeat` counting as a new iteration). Suppose the `DispatchServer`
is serving on `localhost:5000` and two training workers (in either a single
client or multi-client setup) iterate over the below dataset, and there is a
single tf.data worker:
```
range5_dataset = tf.data.Dataset.range(5)
dataset = range5_dataset.apply(tf.data.experimental.service.distribute(
"parallel_epochs", "localhost:5000", job_name="my_job_name"))
for iteration in range(3):
print(list(dataset))
```
The elements of each job will be split between the two processes, with
elements being consumed by the processes on a first-come first-served basis.
One possible result is that process 1 prints
```
[0, 2, 4]
[0, 1, 3]
[1]
```
and process 2 prints
```
[1, 3]
[2, 4]
[0, 2, 3, 4]
```
Job names must not be re-used across different training jobs within the
lifetime of the tf.data service. In general, the tf.data service is expected
to live for the duration of a single training job.
To use the tf.data service with multiple training jobs, make sure to use
different job names to avoid conflicts. For example, suppose a training job
calls `distribute` with `job_name="job"` and reads until end of input. If
another independent job connects to the same tf.data service and tries to read
from `job_name="job"`, it will immediately receive end of input, without
getting any data.
**Round Robin data consumption**
By default, when multiple consumers read from the same job, they receive data
on a first-come first-served basis. In some use cases, it works better to use
a strict round-robin order. For example, the tf.data service can be used to
coordinate example sizes across a cluster during sychronous training, so that
during each step all replicas train on similar-sized elements. To achieve
this, define a dataset which generates rounds of `num_consumers` consecutive
similar-sized batches, then enable round-robin reads by setting
`consumer_index` and `num_consumers`.
Consumers read data by cycling through all workers, reading one element from
each. First, each consumer will read an element from the first worker, then
each consumer will read an element from the second worker, and so on.
NOTE: To keep consumers in sync, round robin data consumption requires that
the dataset have infinite cardinality. You can get this by adding `.repeat()`
at the end of the dataset definition.
**Keras and Distribution Strategies**
The dataset produced by the `distribute` transformation can be passed to
Keras' `Model.fit` or Distribution Strategy's
`tf.distribute.Strategy.experimental_distribute_dataset` like any other
`tf.data.Dataset`. We recommend setting a `job_name` on the call to
`distribute` so that if there are multiple workers, they read data from the
same job. Note that the autosharding normally performed by
`experimental_distribute_dataset` will be disabled when setting a `job_name`,
since sharing the job already results in splitting data across the workers.
When using a shared job, data will be dynamically balanced across workers, so
that they reach end of input about the same time. This results in better
worker utilization than with autosharding, where each worker processes an
independent set of files, and some workers may run out of data earlier than
others.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have each
tf.data worker process a copy of the dataset, or "distributed_epoch" to
split a single iteration of the dataset across all the workers.
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from `0`
to `num_consumers`. Must be specified alongside `num_consumers`. When
specified, consumers will read from the job in a strict round-robin order,
instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead of
the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out of
sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. By default, data is transferred using gRPC.
compression: How to compress the dataset's elements before transferring them
over the network. "AUTO" leaves the decision of how to compress up to the
tf.data service runtime. `None` indicates not to compress.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
runtime decides which workers to read from. If `"ANY"`, reads from any
tf.data service workers. If `"LOCAL"`, only reads from local in-processs
tf.data service workers. `"AUTO"` works well for most cases, while users
can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
data copy if every TF worker colocates with a tf.data service worker.
Defaults to `"AUTO"`.
Returns:
Dataset: A `Dataset` of the elements produced by the data service.
"""
_check_job_name(job_name)
return _distribute(
processing_mode=processing_mode,
service=service,
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
data_transfer_protocol=data_transfer_protocol,
compression=compression,
target_workers=target_workers)
def _register_dataset(service, dataset, compression):
"""Registers a dataset with the tf.data service.
This transformation is similar to `register_dataset`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
dataset: A `tf.data.Dataset` to register with the tf.data service.
compression: How to compress the dataset's elements before transferring them
over the network. "AUTO" leaves the decision of how to compress up to the
tf.data service runtime. `None` indicates not to compress.
Returns:
A scalar int64 tensor of the registered dataset's id.
"""
valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]
if compression not in valid_compressions:
raise ValueError(
"Invalid compression argument: {}. Must be one of {}".format(
compression, valid_compressions))
if isinstance(service, tuple):
protocol, address = service
else:
protocol, address = _parse_service(service)
external_state_policy = dataset.options().experimental_external_state_policy
if external_state_policy is None:
external_state_policy = ExternalStatePolicy.WARN
encoded_spec = ""
if context.executing_eagerly():
coder = nested_structure_coder.StructureCoder()
encoded_spec = coder.encode_structure(
dataset.element_spec).SerializeToString()
if compression == COMPRESSION_AUTO:
dataset = dataset.map(
lambda *x: compression_ops.compress(x),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.prefetch(dataset_ops.AUTOTUNE)
dataset = dataset._apply_debug_options() # pylint: disable=protected-access
dataset_id = gen_experimental_dataset_ops.register_dataset(
dataset._variant_tensor, # pylint: disable=protected-access
address=address,
protocol=protocol,
external_state_policy=external_state_policy.value,
element_spec=encoded_spec)
return dataset_id
@tf_export("data.experimental.service.register_dataset")
def register_dataset(service, dataset):
"""Registers a dataset with the tf.data service.
`register_dataset` registers a dataset with the tf.data service so that
datasets can be created later with
`tf.data.experimental.service.from_dataset_id`. This is useful when the
dataset
is registered by one process, then used in another process. When the same
process is both registering and reading from the dataset, it is simpler to use
`tf.data.experimental.service.distribute` instead.
If the dataset is already registered with the tf.data service,
`register_dataset` returns the already-registered dataset's id.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset_id = tf.data.experimental.service.register_dataset(
... dispatcher.target, dataset)
>>> dataset = tf.data.experimental.service.from_dataset_id(
... processing_mode="parallel_epochs",
... service=dispatcher.target,
... dataset_id=dataset_id,
... element_spec=dataset.element_spec)
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Args:
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
dataset: A `tf.data.Dataset` to register with the tf.data service.
Returns:
A scalar int64 tensor of the registered dataset's id.
"""
return _register_dataset(service, dataset, compression="AUTO")
def _from_dataset_id(processing_mode,
service,
dataset_id,
element_spec,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
task_refresh_interval_hint_ms=None,
data_transfer_protocol=None,
compression="AUTO",
target_workers="AUTO"):
"""Creates a dataset which reads data from the tf.data service.
This transformation is similar to `from_dataset_id`, but supports additional
parameters which we do not yet want to add to the public Python API.
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have each
tf.data worker process a copy of the dataset, or "distributed_epoch" to
split a single iteration of the dataset across all the workers.
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
dataset_id: The id of the dataset to read from. This id is returned by
`register_dataset` when the dataset is registered with the tf.data
service.
element_spec: A nested structure of `tf.TypeSpec`s representing the type of
elements produced by the dataset. This argument is only required inside a
tf.function. Use `tf.data.Dataset.element_spec` to get the element spec
for a given dataset.
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string or tensor. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from `0`
to `num_consumers`. Must be specified alongside `num_consumers`. When
specified, consumers will read from the job in a strict round-robin order,
instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead of
the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out of
sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
task_refresh_interval_hint_ms: (Optional.) A hint for how often to query the
dispatcher for task changes.
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. By default, data is transferred using gRPC.
compression: An indication of how the dataset's elements were compressed, so
that `from_dataset_id` can uncompress them if necessary.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
runtime decides which workers to read from. If `"ANY"`, reads from any
tf.data service workers. If `"LOCAL"`, only reads from local in-processs
tf.data service workers. `"AUTO"` works well for most cases, while users
can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
data copy if every TF worker colocates with a tf.data service worker.
Defaults to `"AUTO"`.
Returns:
A `tf.data.Dataset` which reads from the tf.data service.
"""
ProcessingMode.validate(processing_mode)
valid_compressions = [COMPRESSION_AUTO, COMPRESSION_NONE]
if isinstance(service, tuple):
protocol, address = service
else:
protocol, address = _parse_service(service)
if compression not in valid_compressions:
raise ValueError(
"Invalid compression argument: {}. Must be one of {}".format(
compression, valid_compressions))
if job_name is not None:
if not isinstance(job_name, six.string_types) and not isinstance(
job_name, ops.Tensor):
raise ValueError(
"job_name must be a string or Tensor, but job_name was of type "
"{0}. job_name={1}".format(type(job_name), job_name))
if element_spec is None:
if not context.executing_eagerly():
raise ValueError("In graph mode element_spec must be provided manually.")
dataset_id_val = tensor_util.constant_value(dataset_id)
try:
encoded_spec = _pywrap_server_lib.TF_DATA_GetElementSpec(
dataset_id_val, address, protocol)
except NotImplementedError as err:
raise ValueError("The tf.data service is running an earlier version of "
"TensorFlow that requires specifying `element_spec` as "
"an argument to `from_dataset_id`. Please either supply "
"an element spec or update the tf.data service to the "
"latest version.") from err
except RuntimeError as err:
raise ValueError("Failed to fetch element spec for dataset id " +
str(dataset_id_val) + " from tf.data service. If the "
"dataset was registered in graph mode or inside a "
"tf.function, the `element_spec` must be specified as "
"an argument to `from_dataset_id`.") from err
struct_pb = nested_structure_coder.struct_pb2.StructuredValue()
struct_pb.ParseFromString(encoded_spec)
coder = nested_structure_coder.StructureCoder()
element_spec = coder.decode_proto(struct_pb)
# If we compress, the data service side dataset will produce scalar variants.
data_service_element_spec = (
tensor_spec.TensorSpec(shape=(), dtype=dtypes.variant)
if compression == COMPRESSION_AUTO else element_spec)
dataset = _DataServiceDataset(
dataset_id=dataset_id,
processing_mode=processing_mode,
address=address,
element_spec=data_service_element_spec,
protocol=protocol,
data_transfer_protocol=data_transfer_protocol,
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
task_refresh_interval_hint_ms=task_refresh_interval_hint_ms,
target_workers=target_workers)
if compression == COMPRESSION_AUTO:
dataset = dataset.map(
lambda x: compression_ops.uncompress(x, output_spec=element_spec),
num_parallel_calls=dataset_ops.AUTOTUNE)
# Disable autosharding for shared jobs.
if job_name is not None:
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = AutoShardPolicy.OFF
dataset = dataset.with_options(options)
return dataset
@tf_export("data.experimental.service.from_dataset_id")
def from_dataset_id(processing_mode,
service,
dataset_id,
element_spec=None,
job_name=None,
consumer_index=None,
num_consumers=None,
max_outstanding_requests=None,
data_transfer_protocol=None,
target_workers="AUTO"):
"""Creates a dataset which reads data from the tf.data service.
This is useful when the dataset is registered by one process, then used in
another process. When the same process is both registering and reading from
the dataset, it is simpler to use `tf.data.experimental.service.distribute`
instead.
Before using `from_dataset_id`, the dataset must have been registered with the
tf.data service using `tf.data.experimental.service.register_dataset`.
`register_dataset` returns a dataset id for the registered dataset. That is
the `dataset_id` which should be passed to `from_dataset_id`.
The `element_spec` argument indicates the `tf.TypeSpec`s for the elements
produced by the dataset. Currently `element_spec` must be explicitly
specified, and match the dataset registered under `dataset_id`. `element_spec`
defaults to `None` so that in the future we can support automatically
discovering the `element_spec` by querying the tf.data service.
`tf.data.experimental.service.distribute` is a convenience method which
combines `register_dataset` and `from_dataset_id` into a dataset
transformation.
See the documentation for `tf.data.experimental.service.distribute` for more
detail about how `from_dataset_id` works.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset_id = tf.data.experimental.service.register_dataset(
... dispatcher.target, dataset)
>>> dataset = tf.data.experimental.service.from_dataset_id(
... processing_mode="parallel_epochs",
... service=dispatcher.target,
... dataset_id=dataset_id,
... element_spec=dataset.element_spec)
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Args:
processing_mode: A string specifying the policy for how data should be
processed by tf.data workers. Can be either "parallel_epochs" to have each
tf.data worker process a copy of the dataset, or "distributed_epoch" to
split a single iteration of the dataset across all the workers.
service: A string or a tuple indicating how to connect to the tf.data
service. If it's a string, it should be in the format
`[<protocol>://]<address>`, where `<address>` identifies the dispatcher
address and `<protocol>` can optionally be used to override the default
protocol to use. If it's a tuple, it should be (protocol, address).
dataset_id: The id of the dataset to read from. This id is returned by
`register_dataset` when the dataset is registered with the tf.data
service.
element_spec: A nested structure of `tf.TypeSpec`s representing the type of
elements produced by the dataset. This argument is only required inside a
tf.function. Use `tf.data.Dataset.element_spec` to get the element spec
for a given dataset.
job_name: (Optional.) The name of the job. If provided, it must be a
non-empty string. This argument makes it possible
for multiple datasets to share the same job. The default behavior is that
the dataset creates anonymous, exclusively owned jobs.
consumer_index: (Optional.) The index of the consumer in the range from `0`
to `num_consumers`. Must be specified alongside `num_consumers`. When
specified, consumers will read from the job in a strict round-robin order,
instead of the default first-come-first-served order.
num_consumers: (Optional.) The number of consumers which will consume from
the job. Must be specified alongside `consumer_index`. When specified,
consumers will read from the job in a strict round-robin order, instead of
the default first-come-first-served order. When `num_consumers` is
specified, the dataset must have infinite cardinality to prevent a
producer from running out of data early and causing consumers to go out of
sync.
max_outstanding_requests: (Optional.) A limit on how many elements may be
requested at the same time. You can use this option to control the amount
of memory used, since `distribute` won't use more than `element_size` *
`max_outstanding_requests` of memory.
data_transfer_protocol: (Optional.) The protocol to use for transferring
data with the tf.data service. By default, data is transferred using gRPC.
target_workers: (Optional.) Which workers to read from. If `"AUTO"`, tf.data
runtime decides which workers to read from. If `"ANY"`, reads from any
tf.data service workers. If `"LOCAL"`, only reads from local in-processs
tf.data service workers. `"AUTO"` works well for most cases, while users
can specify other targets. For example, `"LOCAL"` helps avoid RPCs and
data copy if every TF worker colocates with a tf.data service worker.
Defaults to `"AUTO"`.
Returns:
A `tf.data.Dataset` which reads from the tf.data service.
"""
_check_job_name(job_name)
if job_name is not None:
job_name = string_ops.string_join(
["dataset_id=", string_ops.as_string(dataset_id), job_name], "/")
return _from_dataset_id(
processing_mode=processing_mode,
service=service,
dataset_id=dataset_id,
element_spec=element_spec,
job_name=job_name,
consumer_index=consumer_index,
num_consumers=num_consumers,
max_outstanding_requests=max_outstanding_requests,
data_transfer_protocol=data_transfer_protocol,
target_workers=target_workers)
| 47.894792
| 90
| 0.717197
|
2a1aa82c4adbb0cbf1994953ce9a237ea564fa19
| 1,879
|
py
|
Python
|
discoseg/model/vocab.py
|
faithannong/DPLP-faith
|
7768a9e84d6fa3a0cffa50c4abd818076d45fc40
|
[
"MIT"
] | 113
|
2015-09-22T16:02:46.000Z
|
2022-02-21T14:57:57.000Z
|
discoseg/model/vocab.py
|
faithannong/DPLP-faith
|
7768a9e84d6fa3a0cffa50c4abd818076d45fc40
|
[
"MIT"
] | 8
|
2015-02-24T22:40:59.000Z
|
2020-10-05T03:34:33.000Z
|
discoseg/model/vocab.py
|
faithannong/DPLP-faith
|
7768a9e84d6fa3a0cffa50c4abd818076d45fc40
|
[
"MIT"
] | 44
|
2015-03-06T06:47:26.000Z
|
2022-03-13T17:33:08.000Z
|
## vocab.py
## Author: Yangfeng Ji
## Date: 05-02-2015
## Time-stamp: <yangfeng 05/03/2015 03:45:40>
""" Build vocab
1, read each document for creating the feature list
2, collecting all features with their frequency
3, creating vocab with feature selection
"""
from collections import defaultdict
from feature import FeatureGenerator
from cPickle import dump
import gzip
class VocabGenerator(object):
def __init__(self, thresh=20, topn=100):
""" Initialization
"""
self.vocab = {}
self.features = defaultdict(float)
self.thresh = thresh
self.topn = topn
self.fg = FeatureGenerator()
def build(self, doc):
""" Extract features for a given doc
:type doc: Doc instance
:param doc:
"""
featdict = self.fg.extract(doc)
for (idx, featlist) in featdict.iteritems():
for feat in featlist:
self.features[feat] += 1.0
def select(self):
""" Select top-n features according to frequency
"""
pass
def filter(self):
""" Filter out low-frequency features with thresh
"""
index = 0
for (feat, freq) in self.features.iteritems():
if freq >= self.thresh:
self.vocab[feat] = index
index += 1
def getvocab(self):
""" Return vocab
"""
if len(self.vocab) == 0:
raise ValueError("Empty vocab")
return self.vocab
def savevocab(self, fvocab):
""" Dump vocab into a pickle file
"""
if not fvocab.endswith('pickle.gz'):
fvocab += 'pickle.gz'
fout = gzip.open(fvocab, 'w')
if len(self.vocab) == 0:
raise ValueError("Empty vocab")
dump(self.vocab, fout)
print "Save vocab into file: {}".format(fvocab)
| 26.097222
| 57
| 0.56892
|
d9a479f29ebcf582943bb14b7b5b7340acdda6de
| 4,186
|
py
|
Python
|
game/yaml/serializer.py
|
Sage-of-Mirrors/CosmoRen
|
6f74b5c3b5eda2de5756a38d39d9f5f4ab922116
|
[
"MIT"
] | null | null | null |
game/yaml/serializer.py
|
Sage-of-Mirrors/CosmoRen
|
6f74b5c3b5eda2de5756a38d39d9f5f4ab922116
|
[
"MIT"
] | null | null | null |
game/yaml/serializer.py
|
Sage-of-Mirrors/CosmoRen
|
6f74b5c3b5eda2de5756a38d39d9f5f4ab922116
|
[
"MIT"
] | null | null | null |
__all__ = ['Serializer', 'SerializerError']
from yaml.error import YAMLError
from yaml.events import *
from yaml.nodes import *
class SerializerError(YAMLError):
pass
class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
self.use_encoding = encoding
self.use_explicit_start = explicit_start
self.use_explicit_end = explicit_end
self.use_version = version
self.use_tags = tags
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
self.closed = None
def open(self):
if self.closed is None:
self.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
raise SerializerError("serializer is closed")
else:
raise SerializerError("serializer is already opened")
def close(self):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif not self.closed:
self.emit(StreamEndEvent())
self.closed = True
#def __del__(self):
# self.close()
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
if self.anchors[node] is None:
self.anchors[node] = self.generate_anchor(node)
else:
self.anchors[node] = None
if isinstance(node, SequenceNode):
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
for key, value in node.value:
self.anchor_node(key)
self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
return self.ANCHOR_TEMPLATE % self.last_anchor_id
def serialize_node(self, node, parent, index):
alias = self.anchors[node]
if node in self.serialized_nodes:
self.emit(AliasEvent(alias))
else:
self.serialized_nodes[node] = True
self.descend_resolver(parent, index)
if isinstance(node, ScalarNode):
detected_tag = self.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolve(ScalarNode, node.value, (False, True))
implicit = (node.tag == detected_tag), (node.tag == default_tag)
self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
style=node.style))
elif isinstance(node, SequenceNode):
implicit = (node.tag
== self.resolve(SequenceNode, node.value, True))
self.emit(SequenceStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emit(SequenceEndEvent())
elif isinstance(node, MappingNode):
implicit = (node.tag
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
| 37.375
| 82
| 0.583373
|
572a9f63da146ba25cad0d8e6f08f777a3155f65
| 8,142
|
py
|
Python
|
model.py
|
MG2033/Action-Conditional-Video-Prediction
|
02b7fb123729c95f1ebff2c26e2236f7d7913c17
|
[
"Apache-2.0"
] | 2
|
2020-08-28T06:34:05.000Z
|
2020-10-01T11:41:33.000Z
|
model.py
|
MG2033/Action-Conditional-Video-Prediction
|
02b7fb123729c95f1ebff2c26e2236f7d7913c17
|
[
"Apache-2.0"
] | null | null | null |
model.py
|
MG2033/Action-Conditional-Video-Prediction
|
02b7fb123729c95f1ebff2c26e2236f7d7913c17
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from layers import conv2d, conv2d_transpose, pad, flatten, dense, action_transform, get_deconv_filter
class ACVP:
"""ACVP is implemented here!"""
def __init__(self, args, prediction_steps, reuse=False):
self.args = args
self.X = None
self.y = None
self.network_template = None
self.logits = None
self.is_training = None
self.loss = None
self.train_op = None
self.summaries_merged = None
self.batch_size = self.args.batch_size if self.args.train_or_test == "train" else 1
self.reuse = reuse
# Learning rate changes according to the phase of training
# with momentum of 0.9, (squared) gradient momentum of 0.95, and min squared gradient of 0.01
self.RMSProp_params = (0.9, 0.95, 0.01)
# Max prediction step objective
self.prediction_steps = prediction_steps
self.__build()
def __init_input(self):
with tf.variable_scope('input'):
self.X = tf.placeholder(tf.float32, [self.batch_size, self.args.img_height, self.args.img_width,
self.args.num_stack * self.args.num_channels])
self.y = tf.placeholder(tf.float32,
[self.batch_size, self.prediction_steps, self.args.img_height, self.args.img_width,
self.args.num_channels])
self.learning_rate = tf.placeholder(tf.float32)
self.actions = tf.placeholder(tf.float32,
[self.batch_size, self.prediction_steps, self.args.num_actions])
self.is_training = tf.placeholder(tf.bool)
def __init_template(self, x, actions):
with tf.variable_scope('template'):
conv1 = conv2d('conv1', x, num_filters=64, kernel_size=(8, 8), padding='SAME', stride=(2, 2),
activation=tf.nn.relu, batchnorm_enabled=self.args.batchnorm_enabled,
l2_strength=self.args.l2_strength, bias=self.args.bias,
is_training=self.is_training)
conv2 = conv2d('conv2', conv1, num_filters=128, kernel_size=(6, 6), padding='SAME', stride=(2, 2),
activation=tf.nn.relu, batchnorm_enabled=self.args.batchnorm_enabled,
l2_strength=self.args.l2_strength, bias=self.args.bias,
is_training=self.is_training)
conv3 = conv2d('conv3', conv2, num_filters=128, kernel_size=(6, 6), padding='SAME', stride=(2, 2),
activation=tf.nn.relu, batchnorm_enabled=self.args.batchnorm_enabled,
l2_strength=self.args.l2_strength, bias=self.args.bias,
is_training=self.is_training)
conv4 = conv2d('conv4', conv3, num_filters=128, kernel_size=(4, 4), padding='SAME', stride=(2, 2),
activation=tf.nn.relu, batchnorm_enabled=self.args.batchnorm_enabled,
l2_strength=self.args.l2_strength, bias=self.args.bias,
is_training=self.is_training)
flattened = flatten(conv4)
dense1 = dense('dense1', flattened, output_dim=2048, activation=tf.nn.relu,
batchnorm_enabled=self.args.batchnorm_enabled, l2_strength=self.args.l2_strength,
bias=self.args.bias, is_training=self.is_training)
h, w = conv4.get_shape()[1].value, conv4.get_shape()[2].value
features_with_actions = action_transform(dense1, actions, factors=2048,
output_dim=h * w * 128,
final_activation=tf.nn.relu,
batchnorm_enabled=self.args.batchnorm_enabled,
l2_strength=self.args.l2_strength, bias=self.args.bias,
is_training=self.is_training)
f_a_reshaped = tf.reshape(features_with_actions, [-1, h, w, 128], 'f_a_reshaped')
h2, w2 = conv3.get_shape()[1].value, conv3.get_shape()[2].value
deconv1 = conv2d_transpose('deconv1', f_a_reshaped, output_shape=[self.batch_size, h2, w2, 128],
kernel_size=(4, 4),
padding='SAME', stride=(2, 2),
l2_strength=self.args.l2_strength, bias=self.args.bias, activation=tf.nn.relu,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
h3, w3 = conv2.get_shape()[1].value, conv2.get_shape()[2].value
deconv2 = conv2d_transpose('deconv2', deconv1, output_shape=[self.batch_size, h3, w3, 128],
kernel_size=(6, 6),
padding='SAME', stride=(2, 2),
l2_strength=self.args.l2_strength, bias=self.args.bias, activation=tf.nn.relu,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
h4, w4 = conv1.get_shape()[1].value, conv1.get_shape()[2].value
deconv3 = conv2d_transpose('deconv3', deconv2, output_shape=[self.batch_size, h4, w4, 128],
kernel_size=(6, 6),
padding='SAME', stride=(2, 2),
l2_strength=self.args.l2_strength, bias=self.args.bias, activation=tf.nn.relu,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
deconv4 = conv2d_transpose('deconv4', deconv3,
output_shape=[self.batch_size, self.args.img_height, self.args.img_width,
self.args.num_channels],
kernel_size=(8, 8),
padding='SAME', stride=(2, 2),
l2_strength=self.args.l2_strength, bias=self.args.bias, activation=None,
batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
return deconv4
def __init_network(self):
with tf.variable_scope('full_network', reuse=self.reuse):
self.network_template = tf.make_template('network_template', self.__init_template)
all_obs = [None for _ in range(self.prediction_steps)]
current_input = self.X
for i in range(-1, self.prediction_steps - 1):
if i == -1:
all_obs[i + 1] = self.network_template(current_input, self.actions[:, i + 1, :])
else:
current_input = tf.concat([all_obs[i], current_input[:, :, :, :-3]], axis=-1)
all_obs[i + 1] = self.network_template(current_input, self.actions[:, i + 1, :])
self.obs_stacked = tf.stack(all_obs[:self.prediction_steps], axis=1)
self.loss = tf.reduce_mean((1 / self.prediction_steps) * tf.nn.l2_loss(self.obs_stacked - self.y))
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate,
momentum=self.RMSProp_params[0], decay=0.95)
params = tf.trainable_variables()
grads = tf.gradients(self.loss, params)
# Minimum squared gradient is 0.01
grads = [tf.sign(grads[i]) * tf.sqrt(tf.maximum(tf.square(grads[i]), 0.01)) for i in range(len(grads))]
grads = list(zip(grads, params))
self.train_op = optimizer.apply_gradients(grads)
def __build(self):
self.__init_input()
self.__init_network()
| 58.157143
| 119
| 0.550602
|
24d2351d67055fc56179a539351e248df0aa6128
| 9,055
|
py
|
Python
|
examples/pytorch/seal/sampler.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 9,516
|
2018-12-08T22:11:31.000Z
|
2022-03-31T13:04:33.000Z
|
examples/pytorch/seal/sampler.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,494
|
2018-12-08T22:43:00.000Z
|
2022-03-31T21:16:27.000Z
|
examples/pytorch/seal/sampler.py
|
ketyi/dgl
|
a1b859c29b63a673c148d13231a49504740e0e01
|
[
"Apache-2.0"
] | 2,529
|
2018-12-08T22:56:14.000Z
|
2022-03-31T13:07:41.000Z
|
import os.path as osp
from tqdm import tqdm
from copy import deepcopy
import torch
import dgl
from torch.utils.data import DataLoader, Dataset
from dgl import DGLGraph, NID
from dgl.dataloading.negative_sampler import Uniform
from dgl import add_self_loop
from utils import drnl_node_labeling
class GraphDataSet(Dataset):
"""
GraphDataset for torch DataLoader
"""
def __init__(self, graph_list, tensor):
self.graph_list = graph_list
self.tensor = tensor
def __len__(self):
return len(self.graph_list)
def __getitem__(self, index):
return (self.graph_list[index], self.tensor[index])
class PosNegEdgesGenerator(object):
"""
Generate positive and negative samples
Attributes:
g(dgl.DGLGraph): graph
split_edge(dict): split edge
neg_samples(int): num of negative samples per positive sample
subsample_ratio(float): ratio of subsample
shuffle(bool): if shuffle generated graph list
"""
def __init__(self, g, split_edge, neg_samples=1, subsample_ratio=0.1, shuffle=True):
self.neg_sampler = Uniform(neg_samples)
self.subsample_ratio = subsample_ratio
self.split_edge = split_edge
self.g = g
self.shuffle = shuffle
def __call__(self, split_type):
if split_type == 'train':
subsample_ratio = self.subsample_ratio
else:
subsample_ratio = 1
pos_edges = self.split_edge[split_type]['edge']
if split_type == 'train':
# Adding self loop in train avoids sampling the source node itself.
g = add_self_loop(self.g)
eids = g.edge_ids(pos_edges[:, 0], pos_edges[:, 1])
neg_edges = torch.stack(self.neg_sampler(g, eids), dim=1)
else:
neg_edges = self.split_edge[split_type]['edge_neg']
pos_edges = self.subsample(pos_edges, subsample_ratio).long()
neg_edges = self.subsample(neg_edges, subsample_ratio).long()
edges = torch.cat([pos_edges, neg_edges])
labels = torch.cat([torch.ones(pos_edges.size(0), 1), torch.zeros(neg_edges.size(0), 1)])
if self.shuffle:
perm = torch.randperm(edges.size(0))
edges = edges[perm]
labels = labels[perm]
return edges, labels
def subsample(self, edges, subsample_ratio):
"""
Subsample generated edges.
Args:
edges(Tensor): edges to subsample
subsample_ratio(float): ratio of subsample
Returns:
edges(Tensor): edges
"""
num_edges = edges.size(0)
perm = torch.randperm(num_edges)
perm = perm[:int(subsample_ratio * num_edges)]
edges = edges[perm]
return edges
class EdgeDataSet(Dataset):
"""
Assistant Dataset for speeding up the SEALSampler
"""
def __init__(self, edges, labels, transform):
self.edges = edges
self.transform = transform
self.labels = labels
def __len__(self):
return len(self.edges)
def __getitem__(self, index):
subgraph = self.transform(self.edges[index])
return (subgraph, self.labels[index])
class SEALSampler(object):
"""
Sampler for SEAL in paper(no-block version)
The strategy is to sample all the k-hop neighbors around the two target nodes.
Attributes:
graph(DGLGraph): The graph
hop(int): num of hop
num_workers(int): num of workers
"""
def __init__(self, graph, hop=1, num_workers=32, print_fn=print):
self.graph = graph
self.hop = hop
self.print_fn = print_fn
self.num_workers = num_workers
def sample_subgraph(self, target_nodes):
"""
Args:
target_nodes(Tensor): Tensor of two target nodes
Returns:
subgraph(DGLGraph): subgraph
"""
sample_nodes = [target_nodes]
frontiers = target_nodes
for i in range(self.hop):
frontiers = self.graph.out_edges(frontiers)[1]
frontiers = torch.unique(frontiers)
sample_nodes.append(frontiers)
sample_nodes = torch.cat(sample_nodes)
sample_nodes = torch.unique(sample_nodes)
subgraph = dgl.node_subgraph(self.graph, sample_nodes)
# Each node should have unique node id in the new subgraph
u_id = int(torch.nonzero(subgraph.ndata[NID] == int(target_nodes[0]), as_tuple=False))
v_id = int(torch.nonzero(subgraph.ndata[NID] == int(target_nodes[1]), as_tuple=False))
# remove link between target nodes in positive subgraphs.
if subgraph.has_edges_between(u_id, v_id):
link_id = subgraph.edge_ids(u_id, v_id, return_uv=True)[2]
subgraph.remove_edges(link_id)
if subgraph.has_edges_between(v_id, u_id):
link_id = subgraph.edge_ids(v_id, u_id, return_uv=True)[2]
subgraph.remove_edges(link_id)
z = drnl_node_labeling(subgraph, u_id, v_id)
subgraph.ndata['z'] = z
return subgraph
def _collate(self, batch):
batch_graphs, batch_labels = map(list, zip(*batch))
batch_graphs = dgl.batch(batch_graphs)
batch_labels = torch.stack(batch_labels)
return batch_graphs, batch_labels
def __call__(self, edges, labels):
subgraph_list = []
labels_list = []
edge_dataset = EdgeDataSet(edges, labels, transform=self.sample_subgraph)
self.print_fn('Using {} workers in sampling job.'.format(self.num_workers))
sampler = DataLoader(edge_dataset, batch_size=32, num_workers=self.num_workers,
shuffle=False, collate_fn=self._collate)
for subgraph, label in tqdm(sampler, ncols=100):
label_copy = deepcopy(label)
subgraph = dgl.unbatch(subgraph)
del label
subgraph_list += subgraph
labels_list.append(label_copy)
return subgraph_list, torch.cat(labels_list)
class SEALData(object):
"""
1. Generate positive and negative samples
2. Subgraph sampling
Attributes:
g(dgl.DGLGraph): graph
split_edge(dict): split edge
hop(int): num of hop
neg_samples(int): num of negative samples per positive sample
subsample_ratio(float): ratio of subsample
use_coalesce(bool): True for coalesce graph. Graph with multi-edge need to coalesce
"""
def __init__(self, g, split_edge, hop=1, neg_samples=1, subsample_ratio=1, prefix=None, save_dir=None,
num_workers=32, shuffle=True, use_coalesce=True, print_fn=print):
self.g = g
self.hop = hop
self.subsample_ratio = subsample_ratio
self.prefix = prefix
self.save_dir = save_dir
self.print_fn = print_fn
self.generator = PosNegEdgesGenerator(g=self.g,
split_edge=split_edge,
neg_samples=neg_samples,
subsample_ratio=subsample_ratio,
shuffle=shuffle)
if use_coalesce:
for k, v in g.edata.items():
g.edata[k] = v.float() # dgl.to_simple() requires data is float
self.g = dgl.to_simple(g, copy_ndata=True, copy_edata=True, aggregator='sum')
self.ndata = {k: v for k, v in self.g.ndata.items()}
self.edata = {k: v for k, v in self.g.edata.items()}
self.g.ndata.clear()
self.g.edata.clear()
self.print_fn("Save ndata and edata in class.")
self.print_fn("Clear ndata and edata in graph.")
self.sampler = SEALSampler(graph=self.g,
hop=hop,
num_workers=num_workers,
print_fn=print_fn)
def __call__(self, split_type):
if split_type == 'train':
subsample_ratio = self.subsample_ratio
else:
subsample_ratio = 1
path = osp.join(self.save_dir or '', '{}_{}_{}-hop_{}-subsample.bin'.format(self.prefix, split_type,
self.hop, subsample_ratio))
if osp.exists(path):
self.print_fn("Load existing processed {} files".format(split_type))
graph_list, data = dgl.load_graphs(path)
dataset = GraphDataSet(graph_list, data['labels'])
else:
self.print_fn("Processed {} files not exist.".format(split_type))
edges, labels = self.generator(split_type)
self.print_fn("Generate {} edges totally.".format(edges.size(0)))
graph_list, labels = self.sampler(edges, labels)
dataset = GraphDataSet(graph_list, labels)
dgl.save_graphs(path, graph_list, {'labels': labels})
self.print_fn("Save preprocessed subgraph to {}".format(path))
return dataset
| 34.826923
| 111
| 0.608393
|
e4714ac7e9213e5f0d9aa3942a914a7bb81965ee
| 1,288
|
py
|
Python
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeExposedStatisticsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeExposedStatisticsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/DescribeExposedStatisticsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeExposedStatisticsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeExposedStatistics','sas')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
| 40.25
| 84
| 0.773292
|
5a88d2e913264d15e3bb3e26112c9dfd3c09d833
| 8,138
|
py
|
Python
|
lib/js.py
|
walchko/rambler
|
dbd4357b7aaab582280155e6b722868896b1b8dd
|
[
"MIT"
] | null | null | null |
lib/js.py
|
walchko/rambler
|
dbd4357b7aaab582280155e6b722868896b1b8dd
|
[
"MIT"
] | null | null | null |
lib/js.py
|
walchko/rambler
|
dbd4357b7aaab582280155e6b722868896b1b8dd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# by Kevin J. Walchko 26 Aug 2014
#
# PS4 has 6 axes, 14 buttons, 1 hat
# This program doesn't grab all buttons, just the most useful :)
# Warning: this code needs to be updated to support the new py2/py3 library. This
# may crash under py3 and hasn't been tested with the currentl pygecko library.
# https://github.com/chrippa/ds4drv
from __future__ import division
from __future__ import print_function
from math import sqrt
try:
# brew install sdl2
# pip install PySDL2
import sdl2
except:
print('You must install SLD2 library')
print('pip install PySDL2')
exit()
class Sensors(object):
x = 0
y = 0
z = 0
def __init__(self, x, y, z):
self.x = x / 32768
self.y = y / 32768
self.z = z / 32768
def __getitem__(self, i):
if 0 > i > 2:
raise Exception('Sensors out of range')
if i == 0:
return self.x
elif i == 1:
return self.y
else:
return self.z
class Trigger(object):
x = 0
y = 0
def __init__(self, x, y):
"""
triggers go from -32k to +32k
I changed them to go to: 0 to 2.0
"""
self.x = x / 32768 + 1.0
self.y = y / 32768 + 1.0
def __getitem__(self, i):
if 0 > i > 1:
raise Exception('Axis out of range')
if i == 0:
return self.x
else:
return self.y
class Axis(object):
x = 0
y = 0
def __init__(self, x, y):
"""
x and y are independent and range from -1 to 1
Changed orientation frame to:
^ X
|
+-----> Y
"""
self.x = -y / 32768
self.y = x / 32768
# self.x, self.y = self.normalize(-y, x)
def __getitem__(self, i):
if 0 > i > 1:
raise Exception('Axis out of range')
if i == 0:
return self.x
else:
return self.y
def normalize(self, x, y):
norm = x**2 + y**2
if norm > 0:
inorm = 1/sqrt(norm)
x *= inorm
y *= inorm
return (x, y)
else:
return (0, 0)
joystick_format = """
====================================================
Sticks:
Left: {:>5.2f} {:>5.2f} Right: {:>5.2f} {:>5.2f}
L1: {} R1: {}
L2: {:>4.2f} R2: {:>4.2f}
L3: {} R3: {}
Buttons:
Hat: {}
Square: {} Triangle: {} Circle: {} X: {}
Press [Share] to exit
"""
class PS4(object):
leftStick = None
rightStick = None
accels = None
gyros = None
buttons = None
share = None
options = None
stick = None
def __init__(self, ls, rs, triggers, hat, shoulder, stick, btns, options, share):
self.leftStick = ls
self.rightStick = rs
self.buttons = btns
# self.accels = accels
# self.gyros = gyros
self.hat = hat
self.triggers = triggers
self.shoulder = shoulder
self.stick = stick
self.share = share
self.options = options
def __str__(self):
s = joystick_format.format(
self.leftStick.x, self.leftStick.y,
self.rightStick.x, self.rightStick.y,
self.shoulder[0], self.shoulder[1],
self.triggers.x, self.triggers.y,
self.stick[0], self.stick[1],
# self.accels.x, self.accels.y, self.accels.z,
# self.gyros.x, self.gyros.y, self.gyros.z,
self.hat,
self.buttons[0], self.buttons[1], self.buttons[2], self.buttons[3]
)
return s
class Joystick(object):
"""
Joystick class setup to handle a Playstation PS4 Controller. If no joystick
is found, the self.valid is False. If it is not valid, the any returned
dictionaries will contain all 0 values.
Buttons
Square = joystick button 0
X = joystick button 1
Circle = joystick button 2
Triangle= joystick button 3
L1 = joystick button 4
R1 = joystick button 5
L2 = joystick button 6
R2 = joystick button 7
Share = joystick button 8
Options = joystick button 9
L3 = joystick button 10
R3 = joystick button 11
PS = joystick button 12
PadPress= joystick button 13
Axes:
LeftStickX = 0
LeftStickY = 1
RightStickX = 2
RightStickY = 5
L2 = 3 (-1.0f to 1.0f range, unpressed is -1.0f)
R2 = 4 (-1.0f to 1.0f range, unpressed is -1.0f)
WARNING: doesn't work as a process ... something SDL is doing
"""
def __init__(self):
# init SDL2 and grab joystick
sdl2.SDL_Init(sdl2.SDL_INIT_JOYSTICK)
self.js = sdl2.SDL_JoystickOpen(0)
# grab info for display
a = sdl2.SDL_JoystickNumAxes(self.js)
b = sdl2.SDL_JoystickNumButtons(self.js)
h = sdl2.SDL_JoystickNumHats(self.js)
if a == -1:
print('*** No Joystick found ***')
self.valid = False
else:
print('==========================================')
print(' PS4 Joystick ')
print(' axes:', a)
print(' buttons:', b)
print(' hats:', h)
print('==========================================')
self.valid = True
# exit(0)
def __del__(self):
# clean-up
sdl2.SDL_JoystickClose(self.js)
print('Bye ...')
def get(self):
if not self.valid:
return None
js = self.js
sdl2.SDL_JoystickUpdate()
share = sdl2.SDL_JoystickGetButton(js, 8)
if share:
exit(0)
ls = Axis(
sdl2.SDL_JoystickGetAxis(js, 0),
sdl2.SDL_JoystickGetAxis(js, 1)
)
rs = Axis(
sdl2.SDL_JoystickGetAxis(js, 2),
sdl2.SDL_JoystickGetAxis(js, 5)
)
triggers = Trigger(
sdl2.SDL_JoystickGetAxis(js, 3),
sdl2.SDL_JoystickGetAxis(js, 4)
)
shoulder = [
sdl2.SDL_JoystickGetButton(js, 4),
sdl2.SDL_JoystickGetButton(js, 5)
]
stick = [
sdl2.SDL_JoystickGetButton(js, 10),
sdl2.SDL_JoystickGetButton(js, 11)
]
# I seem to have lost sensors
# a = Sensors(
# sdl2.SDL_JoystickGetAxis(js, 6),
# sdl2.SDL_JoystickGetAxis(js, 7),
# sdl2.SDL_JoystickGetAxis(js, 8)
# )
#
# g = Sensors(
# sdl2.SDL_JoystickGetAxis(js, 9),
# sdl2.SDL_JoystickGetAxis(js, 10),
# sdl2.SDL_JoystickGetAxis(js, 11)
# )
b = [
sdl2.SDL_JoystickGetButton(js, 0), # square
sdl2.SDL_JoystickGetButton(js, 3), # triangle
sdl2.SDL_JoystickGetButton(js, 2), # circle
sdl2.SDL_JoystickGetButton(js, 1) # x
]
hat = sdl2.SDL_JoystickGetHat(js, 0)
share = sdl2.SDL_JoystickGetButton(js, 8)
options = sdl2.SDL_JoystickGetButton(js, 9)
ps4 = PS4(ls, rs, triggers, hat, shoulder, stick, b, options, share)
# left axis
# x = sdl2.SDL_JoystickGetAxis(js, 0) / 32768
# y = sdl2.SDL_JoystickGetAxis(js, 1) / 32768
# ps4['leftStick'] = [x, y]
#
# # right axis
# x = sdl2.SDL_JoystickGetAxis(js, 2) / 32768
# y = sdl2.SDL_JoystickGetAxis(js, 5) / 32768
# ps4['rightStick'] = [x, y]
# # other axes
# ps4.axes.L2 = sdl2.SDL_JoystickGetAxis(js, 3) / 32768
# ps4.axes.R2 = sdl2.SDL_JoystickGetAxis(js, 4) / 32768
#
# # accels
# x = sdl2.SDL_JoystickGetAxis(js, 6) / 32768
# y = sdl2.SDL_JoystickGetAxis(js, 7) / 32768
# z = sdl2.SDL_JoystickGetAxis(js, 8) / 32768
# ps4.axes.accels = [x, y, z]
#
# # gyros
# x = sdl2.SDL_JoystickGetAxis(js, 9) / 32768
# y = sdl2.SDL_JoystickGetAxis(js, 10) / 32768
# z = sdl2.SDL_JoystickGetAxis(js, 11) / 32768
# ps4.axes.gyros = [x, y, z]
#
# # get buttons
# ps4.buttons.s = sdl2.SDL_JoystickGetButton(js, 0)
# ps4.buttons.x = sdl2.SDL_JoystickGetButton(js, 1)
# ps4.buttons.o = sdl2.SDL_JoystickGetButton(js, 2)
# ps4.buttons.t = sdl2.SDL_JoystickGetButton(js, 3)
# ps4.buttons.L1 = sdl2.SDL_JoystickGetButton(js, 4)
# ps4.buttons.R1 = sdl2.SDL_JoystickGetButton(js, 5)
# ps4.buttons.L2 = sdl2.SDL_JoystickGetButton(js, 6)
# ps4.buttons.R2 = sdl2.SDL_JoystickGetButton(js, 7)
# ps4.buttons.share = sdl2.SDL_JoystickGetButton(js, 8)
# ps4.buttons.options = sdl2.SDL_JoystickGetButton(js, 9)
# ps4.buttons.L3 = sdl2.SDL_JoystickGetButton(js, 10)
# ps4.buttons.R3 = sdl2.SDL_JoystickGetButton(js, 11)
# ps4.buttons.ps = sdl2.SDL_JoystickGetButton(js, 12)
# ps4.buttons.pad = sdl2.SDL_JoystickGetButton(js, 13)
#
# # get hat
# # [up right down left] = [1 2 4 8]
# ps4.buttons.hat = sdl2.SDL_JoystickGetHat(js, 0)
# print('b 12', sdl2.SDL_JoystickGetButton(js, 12))
# print('b 13', sdl2.SDL_JoystickGetButton(js, 13))
return ps4
def main():
import time
js = Joystick()
while True:
try:
ps4 = js.get()
print(ps4)
time.sleep(0.1)
except KeyboardInterrupt:
print('js exiting ...')
return
if __name__ == "__main__":
main()
| 23.119318
| 82
| 0.624846
|
edb0d233c62a5d3da7ffdc1195008f602cea7e7b
| 1,827
|
py
|
Python
|
examples/series_getitem/series_getitem_scalar_multiple_result.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
examples/series_getitem/series_getitem_scalar_multiple_result.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
examples/series_getitem/series_getitem_scalar_multiple_result.py
|
samir-nasibli/sdc
|
b9144c8799d6454dec3e5c550e305963b24c1570
|
[
"BSD-2-Clause"
] | null | null | null |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import pandas as pd
from numba import njit
@njit
def series_getitem_scalar_many_idx():
series = pd.Series([5, 4, 3, 2, 1], index=[0, 2, 0, 6, 0])
return series[0]
# Expected Series
# 0 5
# 0 3
# 0 1
# dtype: int64
print(series_getitem_scalar_many_idx())
| 41.522727
| 79
| 0.684729
|
d588e9b5a61fed8a452f7c1ec1bf58c32d5843fe
| 65,466
|
py
|
Python
|
google/cloud/bigquery/job/query.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/job/query.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/job/query.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for query jobs."""
import concurrent.futures
import copy
import re
import typing
from typing import Any, Dict, Optional, Union
from google.api_core import exceptions
from google.api_core.future import polling as polling_future
import requests
from google.cloud.bigquery.dataset import Dataset
from google.cloud.bigquery.dataset import DatasetListItem
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
from google.cloud.bigquery.enums import KeyResultStatementKind
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery import _helpers
from google.cloud.bigquery.query import _query_param_from_api_repr
from google.cloud.bigquery.query import ArrayQueryParameter
from google.cloud.bigquery.query import ScalarQueryParameter
from google.cloud.bigquery.query import StructQueryParameter
from google.cloud.bigquery.query import UDFResource
from google.cloud.bigquery.retry import DEFAULT_RETRY
from google.cloud.bigquery.routine import RoutineReference
from google.cloud.bigquery.table import _EmptyRowIterator
from google.cloud.bigquery.table import RangePartitioning
from google.cloud.bigquery.table import _table_arg_to_table_ref
from google.cloud.bigquery.table import TableReference
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery._tqdm_helpers import wait_for_query
from google.cloud.bigquery.job.base import _AsyncJob
from google.cloud.bigquery.job.base import _JobConfig
from google.cloud.bigquery.job.base import _JobReference
if typing.TYPE_CHECKING: # pragma: NO COVER
# Assumption: type checks are only used by library developers and CI environments
# that have all optional dependencies installed, thus no conditional imports.
import pandas
import pyarrow
from google.api_core import retry as retries
from google.cloud import bigquery_storage
from google.cloud.bigquery.table import RowIterator
_CONTAINS_ORDER_BY = re.compile(r"ORDER\s+BY", re.IGNORECASE)
_TIMEOUT_BUFFER_SECS = 0.1
def _contains_order_by(query):
"""Do we need to preserve the order of the query results?
This function has known false positives, such as with ordered window
functions:
.. code-block:: sql
SELECT SUM(x) OVER (
window_name
PARTITION BY...
ORDER BY...
window_frame_clause)
FROM ...
This false positive failure case means the behavior will be correct, but
downloading results with the BigQuery Storage API may be slower than it
otherwise would. This is preferable to the false negative case, where
results are expected to be in order but are not (due to parallel reads).
"""
return query and _CONTAINS_ORDER_BY.search(query)
def _from_api_repr_query_parameters(resource):
return [_query_param_from_api_repr(mapping) for mapping in resource]
def _to_api_repr_query_parameters(value):
return [query_parameter.to_api_repr() for query_parameter in value]
def _from_api_repr_udf_resources(resource):
udf_resources = []
for udf_mapping in resource:
for udf_type, udf_value in udf_mapping.items():
udf_resources.append(UDFResource(udf_type, udf_value))
return udf_resources
def _to_api_repr_udf_resources(value):
return [{udf_resource.udf_type: udf_resource.value} for udf_resource in value]
def _from_api_repr_table_defs(resource):
return {k: ExternalConfig.from_api_repr(v) for k, v in resource.items()}
def _to_api_repr_table_defs(value):
return {k: ExternalConfig.to_api_repr(v) for k, v in value.items()}
class DmlStats(typing.NamedTuple):
"""Detailed statistics for DML statements.
https://cloud.google.com/bigquery/docs/reference/rest/v2/DmlStats
"""
inserted_row_count: int = 0
"""Number of inserted rows. Populated by DML INSERT and MERGE statements."""
deleted_row_count: int = 0
"""Number of deleted rows. populated by DML DELETE, MERGE and TRUNCATE statements.
"""
updated_row_count: int = 0
"""Number of updated rows. Populated by DML UPDATE and MERGE statements."""
@classmethod
def from_api_repr(cls, stats: Dict[str, str]) -> "DmlStats":
# NOTE: The field order here must match the order of fields set at the
# class level.
api_fields = ("insertedRowCount", "deletedRowCount", "updatedRowCount")
args = (
int(stats.get(api_field, default_val))
for api_field, default_val in zip(api_fields, cls.__new__.__defaults__)
)
return cls(*args)
class ScriptOptions:
"""Options controlling the execution of scripts.
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#ScriptOptions
"""
def __init__(
self,
statement_timeout_ms: Optional[int] = None,
statement_byte_budget: Optional[int] = None,
key_result_statement: Optional[KeyResultStatementKind] = None,
):
self._properties = {}
self.statement_timeout_ms = statement_timeout_ms
self.statement_byte_budget = statement_byte_budget
self.key_result_statement = key_result_statement
@classmethod
def from_api_repr(cls, resource: Dict[str, Any]) -> "ScriptOptions":
"""Factory: construct instance from the JSON repr.
Args:
resource(Dict[str: Any]):
ScriptOptions representation returned from API.
Returns:
google.cloud.bigquery.ScriptOptions:
ScriptOptions sample parsed from ``resource``.
"""
entry = cls()
entry._properties = copy.deepcopy(resource)
return entry
def to_api_repr(self) -> Dict[str, Any]:
"""Construct the API resource representation."""
return copy.deepcopy(self._properties)
@property
def statement_timeout_ms(self) -> Union[int, None]:
"""Timeout period for each statement in a script."""
return _helpers._int_or_none(self._properties.get("statementTimeoutMs"))
@statement_timeout_ms.setter
def statement_timeout_ms(self, value: Union[int, None]):
if value is not None:
value = str(value)
self._properties["statementTimeoutMs"] = value
@property
def statement_byte_budget(self) -> Union[int, None]:
"""Limit on the number of bytes billed per statement.
Exceeding this budget results in an error.
"""
return _helpers._int_or_none(self._properties.get("statementByteBudget"))
@statement_byte_budget.setter
def statement_byte_budget(self, value: Union[int, None]):
if value is not None:
value = str(value)
self._properties["statementByteBudget"] = value
@property
def key_result_statement(self) -> Union[KeyResultStatementKind, None]:
"""Determines which statement in the script represents the "key result".
This is used to populate the schema and query results of the script job.
Default is ``KeyResultStatementKind.LAST``.
"""
return self._properties.get("keyResultStatement")
@key_result_statement.setter
def key_result_statement(self, value: Union[KeyResultStatementKind, None]):
self._properties["keyResultStatement"] = value
class QueryJobConfig(_JobConfig):
"""Configuration options for query jobs.
All properties in this class are optional. Values which are :data:`None` ->
server defaults. Set properties on the constructed configuration by using
the property name as the name of a keyword argument.
"""
def __init__(self, **kwargs):
super(QueryJobConfig, self).__init__("query", **kwargs)
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.destination_encryption_configuration
"""
prop = self._get_sub_prop("destinationEncryptionConfiguration")
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@destination_encryption_configuration.setter
def destination_encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._set_sub_prop("destinationEncryptionConfiguration", api_repr)
@property
def allow_large_results(self):
"""bool: Allow large query results tables (legacy SQL, only)
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.allow_large_results
"""
return self._get_sub_prop("allowLargeResults")
@allow_large_results.setter
def allow_large_results(self, value):
self._set_sub_prop("allowLargeResults", value)
@property
def create_disposition(self):
"""google.cloud.bigquery.job.CreateDisposition: Specifies behavior
for creating tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.create_disposition
"""
return self._get_sub_prop("createDisposition")
@create_disposition.setter
def create_disposition(self, value):
self._set_sub_prop("createDisposition", value)
@property
def default_dataset(self):
"""google.cloud.bigquery.dataset.DatasetReference: the default dataset
to use for unqualified table names in the query or :data:`None` if not
set.
The ``default_dataset`` setter accepts:
- a :class:`~google.cloud.bigquery.dataset.Dataset`, or
- a :class:`~google.cloud.bigquery.dataset.DatasetReference`, or
- a :class:`str` of the fully-qualified dataset ID in standard SQL
format. The value must included a project ID and dataset ID
separated by ``.``. For example: ``your-project.your_dataset``.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.default_dataset
"""
prop = self._get_sub_prop("defaultDataset")
if prop is not None:
prop = DatasetReference.from_api_repr(prop)
return prop
@default_dataset.setter
def default_dataset(self, value):
if value is None:
self._set_sub_prop("defaultDataset", None)
return
if isinstance(value, str):
value = DatasetReference.from_string(value)
if isinstance(value, (Dataset, DatasetListItem)):
value = value.reference
resource = value.to_api_repr()
self._set_sub_prop("defaultDataset", resource)
@property
def destination(self):
"""google.cloud.bigquery.table.TableReference: table where results are
written or :data:`None` if not set.
The ``destination`` setter accepts:
- a :class:`~google.cloud.bigquery.table.Table`, or
- a :class:`~google.cloud.bigquery.table.TableReference`, or
- a :class:`str` of the fully-qualified table ID in standard SQL
format. The value must included a project ID, dataset ID, and table
ID, each separated by ``.``. For example:
``your-project.your_dataset.your_table``.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.destination_table
"""
prop = self._get_sub_prop("destinationTable")
if prop is not None:
prop = TableReference.from_api_repr(prop)
return prop
@destination.setter
def destination(self, value):
if value is None:
self._set_sub_prop("destinationTable", None)
return
value = _table_arg_to_table_ref(value)
resource = value.to_api_repr()
self._set_sub_prop("destinationTable", resource)
@property
def dry_run(self):
"""bool: :data:`True` if this query should be a dry run to estimate
costs.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfiguration.FIELDS.dry_run
"""
return self._properties.get("dryRun")
@dry_run.setter
def dry_run(self, value):
self._properties["dryRun"] = value
@property
def flatten_results(self):
"""bool: Flatten nested/repeated fields in results. (Legacy SQL only)
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.flatten_results
"""
return self._get_sub_prop("flattenResults")
@flatten_results.setter
def flatten_results(self, value):
self._set_sub_prop("flattenResults", value)
@property
def maximum_billing_tier(self):
"""int: Deprecated. Changes the billing tier to allow high-compute
queries.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.maximum_billing_tier
"""
return self._get_sub_prop("maximumBillingTier")
@maximum_billing_tier.setter
def maximum_billing_tier(self, value):
self._set_sub_prop("maximumBillingTier", value)
@property
def maximum_bytes_billed(self):
"""int: Maximum bytes to be billed for this job or :data:`None` if not set.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.maximum_bytes_billed
"""
return _helpers._int_or_none(self._get_sub_prop("maximumBytesBilled"))
@maximum_bytes_billed.setter
def maximum_bytes_billed(self, value):
self._set_sub_prop("maximumBytesBilled", str(value))
@property
def priority(self):
"""google.cloud.bigquery.job.QueryPriority: Priority of the query.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.priority
"""
return self._get_sub_prop("priority")
@priority.setter
def priority(self, value):
self._set_sub_prop("priority", value)
@property
def query_parameters(self):
"""List[Union[google.cloud.bigquery.query.ArrayQueryParameter, \
google.cloud.bigquery.query.ScalarQueryParameter, \
google.cloud.bigquery.query.StructQueryParameter]]: list of parameters
for parameterized query (empty by default)
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.query_parameters
"""
prop = self._get_sub_prop("queryParameters", default=[])
return _from_api_repr_query_parameters(prop)
@query_parameters.setter
def query_parameters(self, values):
self._set_sub_prop("queryParameters", _to_api_repr_query_parameters(values))
@property
def range_partitioning(self):
"""Optional[google.cloud.bigquery.table.RangePartitioning]:
Configures range-based partitioning for destination table.
.. note::
**Beta**. The integer range partitioning feature is in a
pre-release state and might change or have limited support.
Only specify at most one of
:attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
:attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
Raises:
ValueError:
If the value is not
:class:`~google.cloud.bigquery.table.RangePartitioning` or
:data:`None`.
"""
resource = self._get_sub_prop("rangePartitioning")
if resource is not None:
return RangePartitioning(_properties=resource)
@range_partitioning.setter
def range_partitioning(self, value):
resource = value
if isinstance(value, RangePartitioning):
resource = value._properties
elif value is not None:
raise ValueError(
"Expected value to be RangePartitioning or None, got {}.".format(value)
)
self._set_sub_prop("rangePartitioning", resource)
@property
def udf_resources(self):
"""List[google.cloud.bigquery.query.UDFResource]: user
defined function resources (empty by default)
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.user_defined_function_resources
"""
prop = self._get_sub_prop("userDefinedFunctionResources", default=[])
return _from_api_repr_udf_resources(prop)
@udf_resources.setter
def udf_resources(self, values):
self._set_sub_prop(
"userDefinedFunctionResources", _to_api_repr_udf_resources(values)
)
@property
def use_legacy_sql(self):
"""bool: Use legacy SQL syntax.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.use_legacy_sql
"""
return self._get_sub_prop("useLegacySql")
@use_legacy_sql.setter
def use_legacy_sql(self, value):
self._set_sub_prop("useLegacySql", value)
@property
def use_query_cache(self):
"""bool: Look for the query result in the cache.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.use_query_cache
"""
return self._get_sub_prop("useQueryCache")
@use_query_cache.setter
def use_query_cache(self, value):
self._set_sub_prop("useQueryCache", value)
@property
def write_disposition(self):
"""google.cloud.bigquery.job.WriteDisposition: Action that occurs if
the destination table already exists.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.write_disposition
"""
return self._get_sub_prop("writeDisposition")
@write_disposition.setter
def write_disposition(self, value):
self._set_sub_prop("writeDisposition", value)
@property
def table_definitions(self):
"""Dict[str, google.cloud.bigquery.external_config.ExternalConfig]:
Definitions for external tables or :data:`None` if not set.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.external_table_definitions
"""
prop = self._get_sub_prop("tableDefinitions")
if prop is not None:
prop = _from_api_repr_table_defs(prop)
return prop
@table_definitions.setter
def table_definitions(self, values):
self._set_sub_prop("tableDefinitions", _to_api_repr_table_defs(values))
@property
def time_partitioning(self):
"""Optional[google.cloud.bigquery.table.TimePartitioning]: Specifies
time-based partitioning for the destination table.
Only specify at most one of
:attr:`~google.cloud.bigquery.job.LoadJobConfig.time_partitioning` or
:attr:`~google.cloud.bigquery.job.LoadJobConfig.range_partitioning`.
Raises:
ValueError:
If the value is not
:class:`~google.cloud.bigquery.table.TimePartitioning` or
:data:`None`.
"""
prop = self._get_sub_prop("timePartitioning")
if prop is not None:
prop = TimePartitioning.from_api_repr(prop)
return prop
@time_partitioning.setter
def time_partitioning(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._set_sub_prop("timePartitioning", api_repr)
@property
def clustering_fields(self):
"""Optional[List[str]]: Fields defining clustering for the table
(Defaults to :data:`None`).
Clustering fields are immutable after table creation.
.. note::
BigQuery supports clustering for both partitioned and
non-partitioned tables.
"""
prop = self._get_sub_prop("clustering")
if prop is not None:
return list(prop.get("fields", ()))
@clustering_fields.setter
def clustering_fields(self, value):
"""Optional[List[str]]: Fields defining clustering for the table
(Defaults to :data:`None`).
"""
if value is not None:
self._set_sub_prop("clustering", {"fields": value})
else:
self._del_sub_prop("clustering")
@property
def schema_update_options(self):
"""List[google.cloud.bigquery.job.SchemaUpdateOption]: Specifies
updates to the destination table schema to allow as a side effect of
the query job.
"""
return self._get_sub_prop("schemaUpdateOptions")
@schema_update_options.setter
def schema_update_options(self, values):
self._set_sub_prop("schemaUpdateOptions", values)
@property
def script_options(self) -> ScriptOptions:
"""Connection properties which can modify the query behavior.
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#scriptoptions
"""
prop = self._get_sub_prop("scriptOptions")
if prop is not None:
prop = ScriptOptions.from_api_repr(prop)
return prop
@script_options.setter
def script_options(self, value: Union[ScriptOptions, None]):
if value is not None:
value = value.to_api_repr()
self._set_sub_prop("scriptOptions", value)
def to_api_repr(self) -> dict:
"""Build an API representation of the query job config.
Returns:
Dict: A dictionary in the format used by the BigQuery API.
"""
resource = copy.deepcopy(self._properties)
# Query parameters have an addition property associated with them
# to indicate if the query is using named or positional parameters.
query_parameters = resource["query"].get("queryParameters")
if query_parameters:
if query_parameters[0].get("name") is None:
resource["query"]["parameterMode"] = "POSITIONAL"
else:
resource["query"]["parameterMode"] = "NAMED"
return resource
class QueryJob(_AsyncJob):
"""Asynchronous job: query tables.
Args:
job_id (str): the job's ID, within the project belonging to ``client``.
query (str): SQL query string.
client (google.cloud.bigquery.client.Client):
A client which holds credentials and project configuration
for the dataset (which requires a project).
job_config (Optional[google.cloud.bigquery.job.QueryJobConfig]):
Extra configuration options for the query job.
"""
_JOB_TYPE = "query"
_UDF_KEY = "userDefinedFunctionResources"
def __init__(self, job_id, query, client, job_config=None):
super(QueryJob, self).__init__(job_id, client)
if job_config is None:
job_config = QueryJobConfig()
if job_config.use_legacy_sql is None:
job_config.use_legacy_sql = False
self._properties["configuration"] = job_config._properties
self._configuration = job_config
if query:
_helpers._set_sub_prop(
self._properties, ["configuration", "query", "query"], query
)
self._query_results = None
self._done_timeout = None
self._transport_timeout = None
@property
def allow_large_results(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.allow_large_results`.
"""
return self._configuration.allow_large_results
@property
def create_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.create_disposition`.
"""
return self._configuration.create_disposition
@property
def default_dataset(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.default_dataset`.
"""
return self._configuration.default_dataset
@property
def destination(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.destination`.
"""
return self._configuration.destination
@property
def destination_encryption_configuration(self):
"""google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
encryption configuration for the destination table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See
:attr:`google.cloud.bigquery.job.QueryJobConfig.destination_encryption_configuration`.
"""
return self._configuration.destination_encryption_configuration
@property
def dry_run(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.dry_run`.
"""
return self._configuration.dry_run
@property
def flatten_results(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.flatten_results`.
"""
return self._configuration.flatten_results
@property
def priority(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.priority`.
"""
return self._configuration.priority
@property
def query(self):
"""str: The query text used in this query job.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.query
"""
return _helpers._get_sub_prop(
self._properties, ["configuration", "query", "query"]
)
@property
def query_parameters(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.query_parameters`.
"""
return self._configuration.query_parameters
@property
def udf_resources(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.udf_resources`.
"""
return self._configuration.udf_resources
@property
def use_legacy_sql(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.use_legacy_sql`.
"""
return self._configuration.use_legacy_sql
@property
def use_query_cache(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.use_query_cache`.
"""
return self._configuration.use_query_cache
@property
def write_disposition(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.write_disposition`.
"""
return self._configuration.write_disposition
@property
def maximum_billing_tier(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_billing_tier`.
"""
return self._configuration.maximum_billing_tier
@property
def maximum_bytes_billed(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_bytes_billed`.
"""
return self._configuration.maximum_bytes_billed
@property
def range_partitioning(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.range_partitioning`.
"""
return self._configuration.range_partitioning
@property
def table_definitions(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.table_definitions`.
"""
return self._configuration.table_definitions
@property
def time_partitioning(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.time_partitioning`.
"""
return self._configuration.time_partitioning
@property
def clustering_fields(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.clustering_fields`.
"""
return self._configuration.clustering_fields
@property
def schema_update_options(self):
"""See
:attr:`google.cloud.bigquery.job.QueryJobConfig.schema_update_options`.
"""
return self._configuration.schema_update_options
def to_api_repr(self):
"""Generate a resource for :meth:`_begin`."""
# Use to_api_repr to allow for some configuration properties to be set
# automatically.
configuration = self._configuration.to_api_repr()
return {
"jobReference": self._properties["jobReference"],
"configuration": configuration,
}
@classmethod
def from_api_repr(cls, resource: dict, client) -> "QueryJob":
"""Factory: construct a job given its API representation
Args:
resource (Dict): dataset job representation returned from the API
client (google.cloud.bigquery.client.Client):
Client which holds credentials and project
configuration for the dataset.
Returns:
google.cloud.bigquery.job.QueryJob: Job parsed from ``resource``.
"""
cls._check_resource_config(resource)
job_ref = _JobReference._from_api_repr(resource["jobReference"])
job = cls(job_ref, None, client=client)
job._set_properties(resource)
return job
@property
def query_plan(self):
"""Return query plan from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.query_plan
Returns:
List[google.cloud.bigquery.job.QueryPlanEntry]:
mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
plan_entries = self._job_statistics().get("queryPlan", ())
return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries]
@property
def timeline(self):
"""List(TimelineEntry): Return the query execution timeline
from job statistics.
"""
raw = self._job_statistics().get("timeline", ())
return [TimelineEntry.from_api_repr(entry) for entry in raw]
@property
def total_bytes_processed(self):
"""Return total bytes processed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_processed
Returns:
Optional[int]:
Total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("totalBytesProcessed")
if result is not None:
result = int(result)
return result
@property
def total_bytes_billed(self):
"""Return total bytes billed from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.total_bytes_billed
Returns:
Optional[int]:
Total bytes processed by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("totalBytesBilled")
if result is not None:
result = int(result)
return result
@property
def billing_tier(self):
"""Return billing tier from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.billing_tier
Returns:
Optional[int]:
Billing tier used by the job, or None if job is not
yet complete.
"""
return self._job_statistics().get("billingTier")
@property
def cache_hit(self):
"""Return whether or not query results were served from cache.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.cache_hit
Returns:
Optional[bool]:
whether the query results were returned from cache, or None
if job is not yet complete.
"""
return self._job_statistics().get("cacheHit")
@property
def ddl_operation_performed(self):
"""Optional[str]: Return the DDL operation performed.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_operation_performed
"""
return self._job_statistics().get("ddlOperationPerformed")
@property
def ddl_target_routine(self):
"""Optional[google.cloud.bigquery.routine.RoutineReference]: Return the DDL target routine, present
for CREATE/DROP FUNCTION/PROCEDURE queries.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_target_routine
"""
prop = self._job_statistics().get("ddlTargetRoutine")
if prop is not None:
prop = RoutineReference.from_api_repr(prop)
return prop
@property
def ddl_target_table(self):
"""Optional[google.cloud.bigquery.table.TableReference]: Return the DDL target table, present
for CREATE/DROP TABLE/VIEW queries.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.ddl_target_table
"""
prop = self._job_statistics().get("ddlTargetTable")
if prop is not None:
prop = TableReference.from_api_repr(prop)
return prop
@property
def num_dml_affected_rows(self):
"""Return the number of DML rows affected by the job.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.num_dml_affected_rows
Returns:
Optional[int]:
number of DML rows affected by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("numDmlAffectedRows")
if result is not None:
result = int(result)
return result
@property
def slot_millis(self):
"""Union[int, None]: Slot-milliseconds used by this query job."""
return _helpers._int_or_none(self._job_statistics().get("totalSlotMs"))
@property
def statement_type(self):
"""Return statement type from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.statement_type
Returns:
Optional[str]:
type of statement used by the job, or None if job is not
yet complete.
"""
return self._job_statistics().get("statementType")
@property
def referenced_tables(self):
"""Return referenced tables from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.referenced_tables
Returns:
List[Dict]:
mappings describing the query plan, or an empty list
if the query has not yet completed.
"""
tables = []
datasets_by_project_name = {}
for table in self._job_statistics().get("referencedTables", ()):
t_project = table["projectId"]
ds_id = table["datasetId"]
t_dataset = datasets_by_project_name.get((t_project, ds_id))
if t_dataset is None:
t_dataset = DatasetReference(t_project, ds_id)
datasets_by_project_name[(t_project, ds_id)] = t_dataset
t_name = table["tableId"]
tables.append(t_dataset.table(t_name))
return tables
@property
def undeclared_query_parameters(self):
"""Return undeclared query parameters from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.undeclared_query_parameters
Returns:
List[Union[ \
google.cloud.bigquery.query.ArrayQueryParameter, \
google.cloud.bigquery.query.ScalarQueryParameter, \
google.cloud.bigquery.query.StructQueryParameter \
]]:
Undeclared parameters, or an empty list if the query has
not yet completed.
"""
parameters = []
undeclared = self._job_statistics().get("undeclaredQueryParameters", ())
for parameter in undeclared:
p_type = parameter["parameterType"]
if "arrayType" in p_type:
klass = ArrayQueryParameter
elif "structTypes" in p_type:
klass = StructQueryParameter
else:
klass = ScalarQueryParameter
parameters.append(klass.from_api_repr(parameter))
return parameters
@property
def estimated_bytes_processed(self):
"""Return the estimated number of bytes processed by the query.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobStatistics2.FIELDS.estimated_bytes_processed
Returns:
Optional[int]:
number of DML rows affected by the job, or None if job is not
yet complete.
"""
result = self._job_statistics().get("estimatedBytesProcessed")
if result is not None:
result = int(result)
return result
@property
def dml_stats(self) -> Optional[DmlStats]:
stats = self._job_statistics().get("dmlStats")
if stats is None:
return None
else:
return DmlStats.from_api_repr(stats)
def _blocking_poll(self, timeout=None, **kwargs):
self._done_timeout = timeout
self._transport_timeout = timeout
super(QueryJob, self)._blocking_poll(timeout=timeout, **kwargs)
@staticmethod
def _format_for_exception(query, job_id):
"""Format a query for the output in exception message.
Args:
query (str): The SQL query to format.
job_id (str): The ID of the job that ran the query.
Returns:
str: A formatted query text.
"""
template = "\n\n(job ID: {job_id})\n\n{header}\n\n{ruler}\n{body}\n{ruler}"
lines = query.splitlines()
max_line_len = max(len(line) for line in lines)
header = "-----Query Job SQL Follows-----"
header = "{:^{total_width}}".format(header, total_width=max_line_len + 5)
# Print out a "ruler" above and below the SQL so we can judge columns.
# Left pad for the line numbers (4 digits plus ":").
ruler = " |" + " . |" * (max_line_len // 10)
# Put line numbers next to the SQL.
body = "\n".join(
"{:4}:{}".format(n, line) for n, line in enumerate(lines, start=1)
)
return template.format(job_id=job_id, header=header, ruler=ruler, body=body)
def _begin(self, client=None, retry=DEFAULT_RETRY, timeout=None):
"""API call: begin the job via a POST request
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert
Args:
client (Optional[google.cloud.bigquery.client.Client]):
The client to use. If not passed, falls back to the ``client``
associated with the job object or``NoneType``.
retry (Optional[google.api_core.retry.Retry]):
How to retry the RPC.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
Raises:
ValueError: If the job has already begun.
"""
try:
super(QueryJob, self)._begin(client=client, retry=retry, timeout=timeout)
except exceptions.GoogleAPICallError as exc:
exc.message += self._format_for_exception(self.query, self.job_id)
exc.query_job = self
raise
def _reload_query_results(
self, retry: "retries.Retry" = DEFAULT_RETRY, timeout: float = None
):
"""Refresh the cached query results.
Args:
retry (Optional[google.api_core.retry.Retry]):
How to retry the call that retrieves query results.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
"""
if self._query_results and self._query_results.complete:
return
# Since the API to getQueryResults can hang up to the timeout value
# (default of 10 seconds), set the timeout parameter to ensure that
# the timeout from the futures API is respected. See:
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4135
timeout_ms = None
if self._done_timeout is not None:
# Subtract a buffer for context switching, network latency, etc.
api_timeout = self._done_timeout - _TIMEOUT_BUFFER_SECS
api_timeout = max(min(api_timeout, 10), 0)
self._done_timeout -= api_timeout
self._done_timeout = max(0, self._done_timeout)
timeout_ms = int(api_timeout * 1000)
# If an explicit timeout is not given, fall back to the transport timeout
# stored in _blocking_poll() in the process of polling for job completion.
transport_timeout = timeout if timeout is not None else self._transport_timeout
self._query_results = self._client._get_query_results(
self.job_id,
retry,
project=self.project,
timeout_ms=timeout_ms,
location=self.location,
timeout=transport_timeout,
)
def _done_or_raise(self, retry=DEFAULT_RETRY, timeout=None):
"""Check if the query has finished running and raise if it's not.
If the query has finished, also reload the job itself.
"""
# If an explicit timeout is not given, fall back to the transport timeout
# stored in _blocking_poll() in the process of polling for job completion.
transport_timeout = timeout if timeout is not None else self._transport_timeout
try:
self._reload_query_results(retry=retry, timeout=transport_timeout)
except exceptions.GoogleAPIError as exc:
# Reloading also updates error details on self, thus no need for an
# explicit self.set_exception() call if reloading succeeds.
try:
self.reload(retry=retry, timeout=transport_timeout)
except exceptions.GoogleAPIError:
# Use the query results reload exception, as it generally contains
# much more useful error information.
self.set_exception(exc)
finally:
return
# Only reload the job once we know the query is complete.
# This will ensure that fields such as the destination table are
# correctly populated.
if not self._query_results.complete:
raise polling_future._OperationNotComplete()
else:
try:
self.reload(retry=retry, timeout=transport_timeout)
except exceptions.GoogleAPIError as exc:
self.set_exception(exc)
def result(
self,
page_size: int = None,
max_results: int = None,
retry: "retries.Retry" = DEFAULT_RETRY,
timeout: float = None,
start_index: int = None,
) -> Union["RowIterator", _EmptyRowIterator]:
"""Start the job and wait for it to complete and get the result.
Args:
page_size (Optional[int]):
The maximum number of rows in each page of results from this
request. Non-positive values are ignored.
max_results (Optional[int]):
The maximum total number of rows from this request.
retry (Optional[google.api_core.retry.Retry]):
How to retry the call that retrieves rows. If the job state is
``DONE``, retrying is aborted early even if the results are not
available, as this will not change anymore.
timeout (Optional[float]):
The number of seconds to wait for the underlying HTTP transport
before using ``retry``.
If multiple requests are made under the hood, ``timeout``
applies to each individual request.
start_index (Optional[int]):
The zero-based index of the starting row to read.
Returns:
google.cloud.bigquery.table.RowIterator:
Iterator of row data
:class:`~google.cloud.bigquery.table.Row`-s. During each
page, the iterator will have the ``total_rows`` attribute
set, which counts the total number of rows **in the result
set** (this is distinct from the total number of rows in the
current page: ``iterator.page.num_items``).
If the query is a special query that produces no results, e.g.
a DDL query, an ``_EmptyRowIterator`` instance is returned.
Raises:
google.cloud.exceptions.GoogleAPICallError:
If the job failed.
concurrent.futures.TimeoutError:
If the job did not complete in the given timeout.
"""
try:
super(QueryJob, self).result(retry=retry, timeout=timeout)
# Since the job could already be "done" (e.g. got a finished job
# via client.get_job), the superclass call to done() might not
# set the self._query_results cache.
self._reload_query_results(retry=retry, timeout=timeout)
except exceptions.GoogleAPICallError as exc:
exc.message += self._format_for_exception(self.query, self.job_id)
exc.query_job = self
raise
except requests.exceptions.Timeout as exc:
raise concurrent.futures.TimeoutError from exc
# If the query job is complete but there are no query results, this was
# special job, such as a DDL query. Return an empty result set to
# indicate success and avoid calling tabledata.list on a table which
# can't be read (such as a view table).
if self._query_results.total_rows is None:
return _EmptyRowIterator()
rows = self._client._list_rows_from_query_results(
self.job_id,
self.location,
self.project,
self._query_results.schema,
total_rows=self._query_results.total_rows,
destination=self.destination,
page_size=page_size,
max_results=max_results,
start_index=start_index,
retry=retry,
timeout=timeout,
)
rows._preserve_order = _contains_order_by(self.query)
return rows
# If changing the signature of this method, make sure to apply the same
# changes to table.RowIterator.to_arrow(), except for the max_results parameter
# that should only exist here in the QueryJob method.
def to_arrow(
self,
progress_bar_type: str = None,
bqstorage_client: "bigquery_storage.BigQueryReadClient" = None,
create_bqstorage_client: bool = True,
max_results: Optional[int] = None,
) -> "pyarrow.Table":
"""[Beta] Create a class:`pyarrow.Table` by loading all pages of a
table or query.
Args:
progress_bar_type (Optional[str]):
If set, use the `tqdm <https://tqdm.github.io/>`_ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
A BigQuery Storage API client. If supplied, use the faster
BigQuery Storage API to fetch rows from BigQuery. This API
is a billable API.
This method requires the ``pyarrow`` and
``google-cloud-bigquery-storage`` libraries.
Reading from a specific partition or snapshot is not
currently supported by this method.
create_bqstorage_client (Optional[bool]):
If ``True`` (default), create a BigQuery Storage API client
using the default API settings. The BigQuery Storage API
is a faster way to fetch rows from BigQuery. See the
``bqstorage_client`` parameter for more information.
This argument does nothing if ``bqstorage_client`` is supplied.
..versionadded:: 1.24.0
max_results (Optional[int]):
Maximum number of rows to include in the result. No limit by default.
..versionadded:: 2.21.0
Returns:
pyarrow.Table
A :class:`pyarrow.Table` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError:
If the :mod:`pyarrow` library cannot be imported.
..versionadded:: 1.17.0
"""
query_result = wait_for_query(self, progress_bar_type, max_results=max_results)
return query_result.to_arrow(
progress_bar_type=progress_bar_type,
bqstorage_client=bqstorage_client,
create_bqstorage_client=create_bqstorage_client,
)
# If changing the signature of this method, make sure to apply the same
# changes to table.RowIterator.to_dataframe(), except for the max_results parameter
# that should only exist here in the QueryJob method.
def to_dataframe(
self,
bqstorage_client: "bigquery_storage.BigQueryReadClient" = None,
dtypes: Dict[str, Any] = None,
progress_bar_type: str = None,
create_bqstorage_client: bool = True,
date_as_object: bool = True,
max_results: Optional[int] = None,
) -> "pandas.DataFrame":
"""Return a pandas DataFrame from a QueryJob
Args:
bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
A BigQuery Storage API client. If supplied, use the faster
BigQuery Storage API to fetch rows from BigQuery. This
API is a billable API.
This method requires the ``fastavro`` and
``google-cloud-bigquery-storage`` libraries.
Reading from a specific partition or snapshot is not
currently supported by this method.
dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
A dictionary of column names pandas ``dtype``s. The provided
``dtype`` is used when constructing the series for the column
specified. Otherwise, the default pandas behavior is used.
progress_bar_type (Optional[str]):
If set, use the `tqdm <https://tqdm.github.io/>`_ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
See
:func:`~google.cloud.bigquery.table.RowIterator.to_dataframe`
for details.
..versionadded:: 1.11.0
create_bqstorage_client (Optional[bool]):
If ``True`` (default), create a BigQuery Storage API client
using the default API settings. The BigQuery Storage API
is a faster way to fetch rows from BigQuery. See the
``bqstorage_client`` parameter for more information.
This argument does nothing if ``bqstorage_client`` is supplied.
..versionadded:: 1.24.0
date_as_object (Optional[bool]):
If ``True`` (default), cast dates to objects. If ``False``, convert
to datetime64[ns] dtype.
..versionadded:: 1.26.0
max_results (Optional[int]):
Maximum number of rows to include in the result. No limit by default.
..versionadded:: 2.21.0
Returns:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError: If the `pandas` library cannot be imported.
"""
query_result = wait_for_query(self, progress_bar_type, max_results=max_results)
return query_result.to_dataframe(
bqstorage_client=bqstorage_client,
dtypes=dtypes,
progress_bar_type=progress_bar_type,
create_bqstorage_client=create_bqstorage_client,
date_as_object=date_as_object,
)
def __iter__(self):
return iter(self.result())
class QueryPlanEntryStep(object):
"""Map a single step in a query plan entry.
Args:
kind (str): step type.
substeps (List): names of substeps.
"""
def __init__(self, kind, substeps):
self.kind = kind
self.substeps = list(substeps)
@classmethod
def from_api_repr(cls, resource: dict) -> "QueryPlanEntryStep":
"""Factory: construct instance from the JSON repr.
Args:
resource (Dict): JSON representation of the entry.
Returns:
google.cloud.bigquery.job.QueryPlanEntryStep:
New instance built from the resource.
"""
return cls(kind=resource.get("kind"), substeps=resource.get("substeps", ()))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.kind == other.kind and self.substeps == other.substeps
class QueryPlanEntry(object):
"""QueryPlanEntry represents a single stage of a query execution plan.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#ExplainQueryStage
for the underlying API representation within query statistics.
"""
def __init__(self):
self._properties = {}
@classmethod
def from_api_repr(cls, resource: dict) -> "QueryPlanEntry":
"""Factory: construct instance from the JSON repr.
Args:
resource(Dict[str: object]):
ExplainQueryStage representation returned from API.
Returns:
google.cloud.bigquery.job.QueryPlanEntry:
Query plan entry parsed from ``resource``.
"""
entry = cls()
entry._properties = resource
return entry
@property
def name(self):
"""Optional[str]: Human-readable name of the stage."""
return self._properties.get("name")
@property
def entry_id(self):
"""Optional[str]: Unique ID for the stage within the plan."""
return self._properties.get("id")
@property
def start(self):
"""Optional[Datetime]: Datetime when the stage started."""
if self._properties.get("startMs") is None:
return None
return _helpers._datetime_from_microseconds(
int(self._properties.get("startMs")) * 1000.0
)
@property
def end(self):
"""Optional[Datetime]: Datetime when the stage ended."""
if self._properties.get("endMs") is None:
return None
return _helpers._datetime_from_microseconds(
int(self._properties.get("endMs")) * 1000.0
)
@property
def input_stages(self):
"""List(int): Entry IDs for stages that were inputs for this stage."""
if self._properties.get("inputStages") is None:
return []
return [
_helpers._int_or_none(entry)
for entry in self._properties.get("inputStages")
]
@property
def parallel_inputs(self):
"""Optional[int]: Number of parallel input segments within
the stage.
"""
return _helpers._int_or_none(self._properties.get("parallelInputs"))
@property
def completed_parallel_inputs(self):
"""Optional[int]: Number of parallel input segments completed."""
return _helpers._int_or_none(self._properties.get("completedParallelInputs"))
@property
def wait_ms_avg(self):
"""Optional[int]: Milliseconds the average worker spent waiting to
be scheduled.
"""
return _helpers._int_or_none(self._properties.get("waitMsAvg"))
@property
def wait_ms_max(self):
"""Optional[int]: Milliseconds the slowest worker spent waiting to
be scheduled.
"""
return _helpers._int_or_none(self._properties.get("waitMsMax"))
@property
def wait_ratio_avg(self):
"""Optional[float]: Ratio of time the average worker spent waiting
to be scheduled, relative to the longest time spent by any worker in
any stage of the overall plan.
"""
return self._properties.get("waitRatioAvg")
@property
def wait_ratio_max(self):
"""Optional[float]: Ratio of time the slowest worker spent waiting
to be scheduled, relative to the longest time spent by any worker in
any stage of the overall plan.
"""
return self._properties.get("waitRatioMax")
@property
def read_ms_avg(self):
"""Optional[int]: Milliseconds the average worker spent reading
input.
"""
return _helpers._int_or_none(self._properties.get("readMsAvg"))
@property
def read_ms_max(self):
"""Optional[int]: Milliseconds the slowest worker spent reading
input.
"""
return _helpers._int_or_none(self._properties.get("readMsMax"))
@property
def read_ratio_avg(self):
"""Optional[float]: Ratio of time the average worker spent reading
input, relative to the longest time spent by any worker in any stage
of the overall plan.
"""
return self._properties.get("readRatioAvg")
@property
def read_ratio_max(self):
"""Optional[float]: Ratio of time the slowest worker spent reading
to be scheduled, relative to the longest time spent by any worker in
any stage of the overall plan.
"""
return self._properties.get("readRatioMax")
@property
def compute_ms_avg(self):
"""Optional[int]: Milliseconds the average worker spent on CPU-bound
processing.
"""
return _helpers._int_or_none(self._properties.get("computeMsAvg"))
@property
def compute_ms_max(self):
"""Optional[int]: Milliseconds the slowest worker spent on CPU-bound
processing.
"""
return _helpers._int_or_none(self._properties.get("computeMsMax"))
@property
def compute_ratio_avg(self):
"""Optional[float]: Ratio of time the average worker spent on
CPU-bound processing, relative to the longest time spent by any
worker in any stage of the overall plan.
"""
return self._properties.get("computeRatioAvg")
@property
def compute_ratio_max(self):
"""Optional[float]: Ratio of time the slowest worker spent on
CPU-bound processing, relative to the longest time spent by any
worker in any stage of the overall plan.
"""
return self._properties.get("computeRatioMax")
@property
def write_ms_avg(self):
"""Optional[int]: Milliseconds the average worker spent writing
output data.
"""
return _helpers._int_or_none(self._properties.get("writeMsAvg"))
@property
def write_ms_max(self):
"""Optional[int]: Milliseconds the slowest worker spent writing
output data.
"""
return _helpers._int_or_none(self._properties.get("writeMsMax"))
@property
def write_ratio_avg(self):
"""Optional[float]: Ratio of time the average worker spent writing
output data, relative to the longest time spent by any worker in any
stage of the overall plan.
"""
return self._properties.get("writeRatioAvg")
@property
def write_ratio_max(self):
"""Optional[float]: Ratio of time the slowest worker spent writing
output data, relative to the longest time spent by any worker in any
stage of the overall plan.
"""
return self._properties.get("writeRatioMax")
@property
def records_read(self):
"""Optional[int]: Number of records read by this stage."""
return _helpers._int_or_none(self._properties.get("recordsRead"))
@property
def records_written(self):
"""Optional[int]: Number of records written by this stage."""
return _helpers._int_or_none(self._properties.get("recordsWritten"))
@property
def status(self):
"""Optional[str]: status of this stage."""
return self._properties.get("status")
@property
def shuffle_output_bytes(self):
"""Optional[int]: Number of bytes written by this stage to
intermediate shuffle.
"""
return _helpers._int_or_none(self._properties.get("shuffleOutputBytes"))
@property
def shuffle_output_bytes_spilled(self):
"""Optional[int]: Number of bytes written by this stage to
intermediate shuffle and spilled to disk.
"""
return _helpers._int_or_none(self._properties.get("shuffleOutputBytesSpilled"))
@property
def steps(self):
"""List(QueryPlanEntryStep): List of step operations performed by
each worker in the stage.
"""
return [
QueryPlanEntryStep.from_api_repr(step)
for step in self._properties.get("steps", [])
]
class TimelineEntry(object):
"""TimelineEntry represents progress of a query job at a particular
point in time.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#querytimelinesample
for the underlying API representation within query statistics.
"""
def __init__(self):
self._properties = {}
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct instance from the JSON repr.
Args:
resource(Dict[str: object]):
QueryTimelineSample representation returned from API.
Returns:
google.cloud.bigquery.TimelineEntry:
Timeline sample parsed from ``resource``.
"""
entry = cls()
entry._properties = resource
return entry
@property
def elapsed_ms(self):
"""Optional[int]: Milliseconds elapsed since start of query
execution."""
return _helpers._int_or_none(self._properties.get("elapsedMs"))
@property
def active_units(self):
"""Optional[int]: Current number of input units being processed
by workers, reported as largest value since the last sample."""
return _helpers._int_or_none(self._properties.get("activeUnits"))
@property
def pending_units(self):
"""Optional[int]: Current number of input units remaining for
query stages active at this sample time."""
return _helpers._int_or_none(self._properties.get("pendingUnits"))
@property
def completed_units(self):
"""Optional[int]: Current number of input units completed by
this query."""
return _helpers._int_or_none(self._properties.get("completedUnits"))
@property
def slot_millis(self):
"""Optional[int]: Cumulative slot-milliseconds consumed by
this query."""
return _helpers._int_or_none(self._properties.get("totalSlotMs"))
| 35.911135
| 134
| 0.645831
|
709c33b3cc28e41f2b28d27ba50d95eb4f24ecc8
| 835
|
py
|
Python
|
app/views/callback.py
|
Md-Kais/spotify-playing-readme
|
3e78b3bc7ab9e90160eaae8b09884a125a387fd8
|
[
"Apache-2.0"
] | 1
|
2021-08-13T04:31:06.000Z
|
2021-08-13T04:31:06.000Z
|
app/views/callback.py
|
Md-Kais/spotify-playing-readme
|
3e78b3bc7ab9e90160eaae8b09884a125a387fd8
|
[
"Apache-2.0"
] | null | null | null |
app/views/callback.py
|
Md-Kais/spotify-playing-readme
|
3e78b3bc7ab9e90160eaae8b09884a125a387fd8
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint, Response, request, render_template
from app import database
from app.config import BASE_URL, GITHUB_URL
from app.utils import generate_token, get_user_info
callback = Blueprint("/callback", __name__, template_folder="templates")
@callback.route("/callback")
def callback_():
code = request.args.get("code")
if not code:
return Response("No code found!")
try:
token = generate_token(code)
access_token = token["access_token"]
user_id = get_user_info(access_token)["id"]
except KeyError:
return Response("Invalid Auth workflow! Please login correctly.")
database.child("users").child(user_id).set(token)
return render_template(
"callback.html",
id=user_id,
base_url=BASE_URL,
github_url=GITHUB_URL
)
| 26.09375
| 73
| 0.688623
|
b73bbb6d771590b792104a0096a4db69711cbd14
| 736
|
py
|
Python
|
ehr_functions/models/types/nn.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
ehr_functions/models/types/nn.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
ehr_functions/models/types/nn.py
|
fdabek1/EHR-Functions
|
e6bd0b6fa213930358c4a19be31c459ac7430ca9
|
[
"MIT"
] | null | null | null |
# from livelossplot.keras import PlotLossesCallback
from ehr_functions.models.types._base import Model
import numpy as np
class NeuralNetwork(Model):
def __init__(self, model, epochs=10, batch_size=32, round_output=False, **kwargs):
super().__init__(**kwargs)
self.model = model
self.epochs = epochs
self.batch_size = batch_size
self.round_output = round_output
def train(self, x, y):
self.model.fit(x, y, epochs=self.epochs, batch_size=self.batch_size, validation_split=0.2)
# callbacks=[PlotLossesCallback()])
def predict(self, x):
output = self.model.predict(x)
if self.round_output:
output = np.round(output)
return output
| 30.666667
| 98
| 0.66712
|
e5f67f8f1babb8dcaa0899a7af4fb2f546e42b6e
| 3,088
|
py
|
Python
|
gen_potential_employees.py
|
ab-go/enron_graph
|
35ead456e27736bbb4fae20fe179ebe788dcbdc9
|
[
"MIT"
] | null | null | null |
gen_potential_employees.py
|
ab-go/enron_graph
|
35ead456e27736bbb4fae20fe179ebe788dcbdc9
|
[
"MIT"
] | null | null | null |
gen_potential_employees.py
|
ab-go/enron_graph
|
35ead456e27736bbb4fae20fe179ebe788dcbdc9
|
[
"MIT"
] | null | null | null |
import os
import sys
import csv
from names_dataset import NameDataset
import re
import argparse
def generate_potential_employees(f_path):
"""
open the email_employee.csv and parse it to create more employees
"""
f_name = os.path.join(f_path, 'email_employee.csv')
nd = NameDataset()
p = re.compile('([a-z]*)\.([a-z]\.)?([a-z]*)@enron.com')
out_f_name = os.path.join(f_path, 'potential_email_employees.csv')
num_rows_written = 0
with open(out_f_name, 'w', newline='') as out_f:
field_names = ['address', 'firstName', 'lastName']
writer = csv.DictWriter(out_f, fieldnames = field_names)
writer.writeheader()
with open(f_name, 'r') as f:
reader = csv.DictReader(f, quotechar='"')
for row in reader:
eid = row['eid']
email_id = row['address']
try:
v = int(eid)
except:
v = 0
if v != 0:
print("found eid: {} for email_id: {}, continuing".format(v, email_id))
continue
## match this email id
m = p.match(email_id)
if m is None:
continue
## get the groups from this regex
g = m.groups()
## get the first and last names
fn = g[0]
ln = g[-1]
if not fn or not ln:
print("invalid name: firstName: {}, lastName: {}".format(fn, ln))
continue
d = {'address': email_id}
## search for the first name in the db
fn_in_nd = nd.search_first_name(fn)
if not fn_in_nd and nd.search_first_name(ln):
## swap the two
d['firstName'] = ln
d['lastName'] = fn
else:
## continue as default
d['firstName'] = fn
d['lastName'] = ln
## write this to the output file as a row
writer.writerow(d)
num_rows_written += 1
print("wrote {} rows of data to the file: {}".format(num_rows_written, out_f_name))
if __name__ == '__main__':
desc = """Generate potential employees based on email signatures.
A lot of email addresses in the db don't have corresponding Employee ids.
However, their email addresses (@enron.com) suggest that they might be
employees. In addition, their email addresses follow the pattern
<firstName>.<lastName>@enron.com.
For these email addresses, nodes labelled PotentialEmployee are generated
in the table so that the search experience is richer. The names_dataset
module is used to check the order of first names and last names in their
email addresses. When it seems more likely that their address was
<secondName>.<firstName>, these names are swapped around.
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--path', dest='path', type=str, help='path to email_employees.csv file', default='.')
args = parser.parse_args()
generate_potential_employees(args.path)
| 33.565217
| 110
| 0.590674
|
d78db1a6d84b50bb668ab7ac0dd317f5035153c3
| 1,137
|
py
|
Python
|
test/modules/http2/test_501_proxy_serverheader.py
|
ICLXLxqj0g/chenrichard10y
|
df2619419646f6f8b2514680445955782a2a3c0a
|
[
"Apache-2.0"
] | 2,529
|
2015-01-02T11:52:53.000Z
|
2022-03-30T19:54:27.000Z
|
test/modules/http2/test_501_proxy_serverheader.py
|
ICLXLxqj0g/chenrichard10y
|
df2619419646f6f8b2514680445955782a2a3c0a
|
[
"Apache-2.0"
] | 133
|
2015-04-21T05:50:45.000Z
|
2022-03-30T14:23:40.000Z
|
test/modules/http2/test_501_proxy_serverheader.py
|
ICLXLxqj0g/chenrichard10y
|
df2619419646f6f8b2514680445955782a2a3c0a
|
[
"Apache-2.0"
] | 1,113
|
2015-01-01T14:47:02.000Z
|
2022-03-29T16:47:18.000Z
|
import pytest
from .env import H2Conf
class TestProxyServerHeader:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
conf = H2Conf(env, extras={
f'cgi.{env.http_tld}': [
"Header unset Server",
"Header always set Server cgi",
]
})
conf.add_vhost_cgi(proxy_self=True, h2proxy_self=False)
conf.install()
assert env.apache_restart() == 0
def setup_method(self, method):
print("setup_method: %s" % method.__name__)
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
def test_h2_501_01(self, env):
url = env.mkurl("https", "cgi", "/proxy/hello.py")
r = env.curl_get(url, 5)
assert r.response["status"] == 200
assert "HTTP/1.1" == r.response["json"]["protocol"]
assert "" == r.response["json"]["https"]
assert "" == r.response["json"]["ssl_protocol"]
assert "" == r.response["json"]["h2"]
assert "" == r.response["json"]["h2push"]
assert "cgi" == r.response["header"]["server"]
| 31.583333
| 63
| 0.57168
|
e8d3b893668e7b789a00e6cab0f8b80a42bc0e68
| 1,232
|
py
|
Python
|
sympy/strategies/traverse.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/strategies/traverse.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/strategies/traverse.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
"""Strategies to Traverse a Tree."""
from __future__ import print_function, division
from sympy.strategies.util import basic_fns
from sympy.strategies.core import chain, do_one
def top_down(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the top nodes first."""
return chain(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up(rule, fns=basic_fns):
"""Apply a rule down a tree running it on the bottom nodes first."""
return chain(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def top_down_once(rule, fns=basic_fns):
"""Apply a rule down a tree - stop on success."""
return do_one(rule, lambda expr: sall(top_down(rule, fns), fns)(expr))
def bottom_up_once(rule, fns=basic_fns):
"""Apply a rule up a tree - stop on success."""
return do_one(lambda expr: sall(bottom_up(rule, fns), fns)(expr), rule)
def sall(rule, fns=basic_fns):
"""Strategic all - apply rule to args."""
op, new, children, leaf = map(fns.get, ("op", "new", "children", "leaf"))
def all_rl(expr):
if leaf(expr):
return expr
else:
args = map(rule, children(expr))
return new(op(expr), *args)
return all_rl
| 30.8
| 77
| 0.660714
|
e24536f42ec572ab9573485d6f8da0568be0ecb6
| 9,682
|
py
|
Python
|
distributed/cifar10-100/multi_device_multi_process_classification.py
|
JonathanLehner/nnabla-examples
|
2971b987484945e12fb171594181908789485a0f
|
[
"Apache-2.0"
] | null | null | null |
distributed/cifar10-100/multi_device_multi_process_classification.py
|
JonathanLehner/nnabla-examples
|
2971b987484945e12fb171594181908789485a0f
|
[
"Apache-2.0"
] | null | null | null |
distributed/cifar10-100/multi_device_multi_process_classification.py
|
JonathanLehner/nnabla-examples
|
2971b987484945e12fb171594181908789485a0f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import re
import time
from args import get_args
from cifar10_data import data_iterator_cifar10
from cifar100_data import data_iterator_cifar100
from nnabla.utils.data_iterator import data_iterator
import nnabla as nn
import nnabla.communicators as C
from nnabla.ext_utils import get_extension_context
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solvers as S
import numpy as np
import functools
from models import (resnet23_prediction, categorical_error, loss_function)
from checkpoint import save_checkpoint, load_checkpoint
def backward_and_all_reduce(loss, comm, with_all_reduce_callback=False):
params = [x.grad for x in nn.get_parameters().values()]
if with_all_reduce_callback:
# All-reduce gradients every 2MiB parameters during backward computation
loss.backward(clear_buffer=True,
communicator_callbacks=comm.all_reduce_callback(params, 1024 * 1024 * 2))
else:
loss.backward(clear_buffer=True)
comm.all_reduce(params, division=False, inplace=False)
def train():
"""
Naive Multi-Device Training
NOTE: the communicator exposes low-level interfaces
* Parse command line arguments.
* Instantiate a communicator and set parameter variables.
* Specify contexts for computation.
* Initialize DataIterator.
* Construct a computation graph for training and one for validation.
* Initialize solver and set parameter variables to that.
* Create monitor instances for saving and displaying training stats.
* Training loop
* Computate error rate for validation data (periodically)
* Get a next minibatch.
* Execute forwardprop
* Set parameter gradients zero
* Execute backprop.
* AllReduce for gradients
* Solver updates parameters by using gradients computed by backprop and all reduce.
* Compute training error
"""
# Parse args
args = get_args()
n_train_samples = 50000
n_valid_samples = 10000
bs_valid = args.batch_size
# Create Communicator and Context
extension_module = "cudnn"
ctx = get_extension_context(extension_module, type_config=args.type_config)
comm = C.MultiProcessDataParalellCommunicator(ctx)
comm.init()
n_devices = comm.size
mpi_rank = comm.rank
mpi_local_rank = comm.local_rank
device_id = mpi_local_rank
ctx.device_id = str(device_id)
nn.set_default_context(ctx)
# Model
rng = np.random.RandomState(313)
comm_syncbn = comm if args.sync_bn else None
if args.net == "cifar10_resnet23":
prediction = functools.partial(
resnet23_prediction, rng=rng, ncls=10, nmaps=32, act=F.relu, comm=comm_syncbn)
data_iterator = data_iterator_cifar10
if args.net == "cifar100_resnet23":
prediction = functools.partial(
resnet23_prediction, rng=rng, ncls=100, nmaps=384, act=F.elu, comm=comm_syncbn)
data_iterator = data_iterator_cifar100
# Create training graphs
image_train = nn.Variable((args.batch_size, 3, 32, 32))
label_train = nn.Variable((args.batch_size, 1))
pred_train = prediction(image_train, test=False)
pred_train.persistent = True
loss_train = (loss_function(pred_train, label_train) /
n_devices).apply(persistent=True)
error_train = F.mean(F.top_n_error(
pred_train, label_train, axis=1)).apply(persistent=True)
loss_error_train = F.sink(loss_train, error_train)
input_image_train = {"image": image_train, "label": label_train}
# Create validation graph
image_valid = nn.Variable((bs_valid, 3, 32, 32))
label_valid = nn.Variable((args.batch_size, 1))
pred_valid = prediction(image_valid, test=True)
error_valid = F.mean(F.top_n_error(pred_valid, label_valid, axis=1))
input_image_valid = {"image": image_valid, "label": label_valid}
# Solvers
solver = S.Adam()
solver.set_parameters(nn.get_parameters())
base_lr = args.learning_rate
warmup_iter = int(1. * n_train_samples /
args.batch_size / n_devices) * args.warmup_epoch
warmup_slope = base_lr * (n_devices - 1) / warmup_iter
solver.set_learning_rate(base_lr)
# load checkpoint if file exist.
start_point = 0
if args.use_latest_checkpoint:
files = glob.glob(f'{args.model_save_path}/checkpoint_*.json')
if len(files) != 0:
index = max(
[int(n) for n in [re.sub(r'.*checkpoint_(\d+).json', '\\1', f) for f in files]])
# load weights and solver state info from specified checkpoint file.
start_point = load_checkpoint(
f'{args.model_save_path}/checkpoint_{index}.json', solver)
# Create monitor
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=10)
monitor_verr = MonitorSeries("Validation error", monitor, interval=1)
monitor_vtime = MonitorTimeElapsed("Validation time", monitor, interval=1)
# Data Iterator
# If the data does not exist, it will try to download it from the server
# and prepare it. When executing multiple processes on the same host, it is
# necessary to execute initial data preparation by the representative
# process (local_rank is 0) on the host.
# Prepare data only when local_rank is 0
if mpi_rank == 0:
rng = np.random.RandomState(device_id)
_, tdata = data_iterator(args.batch_size, True, rng)
vsource, vdata = data_iterator(args.batch_size, False)
# Wait for data to be prepared without watchdog
comm.barrier()
# Prepare data when local_rank is not 0
if mpi_rank != 0:
rng = np.random.RandomState(device_id)
_, tdata = data_iterator(args.batch_size, True, rng)
vsource, vdata = data_iterator(args.batch_size, False)
# loss_error_train.forward()
# Training-loop
ve = nn.Variable()
model_save_interval = 0
for i in range(start_point, int(args.max_iter / n_devices)):
# Validation
if i % int(n_train_samples / args.batch_size / n_devices) == 0:
ve_local = 0.
k = 0
idx = np.random.permutation(n_valid_samples)
val_images = vsource.images[idx]
val_labels = vsource.labels[idx]
for j in range(int(n_valid_samples / n_devices * mpi_rank),
int(n_valid_samples / n_devices * (mpi_rank + 1)),
bs_valid):
image = val_images[j:j + bs_valid]
label = val_labels[j:j + bs_valid]
if len(image) != bs_valid: # note that smaller batch is ignored
continue
input_image_valid["image"].d = image
input_image_valid["label"].d = label
error_valid.forward(clear_buffer=True)
ve_local += error_valid.d.copy()
k += 1
ve_local /= k
ve.d = ve_local
comm.all_reduce(ve.data, division=True, inplace=True)
# Save model
if mpi_rank == 0:
monitor_verr.add(i * n_devices, ve.d.copy())
monitor_vtime.add(i * n_devices)
if model_save_interval <= 0:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
save_checkpoint(args.model_save_path, i, solver)
model_save_interval += int(
args.model_save_interval / n_devices)
model_save_interval -= 1
# Forward/Zerograd
image, label = tdata.next()
input_image_train["image"].d = image
input_image_train["label"].d = label
loss_error_train.forward(clear_no_need_grad=True)
solver.zero_grad()
# Backward/AllReduce
backward_and_all_reduce(
loss_error_train, comm, with_all_reduce_callback=args.with_all_reduce_callback)
# Solvers update
solver.update()
# Linear Warmup
if i <= warmup_iter:
lr = base_lr + warmup_slope * i
solver.set_learning_rate(lr)
if mpi_rank == 0: # loss and error locally, and elapsed time
monitor_loss.add(i * n_devices, loss_train.d.copy())
monitor_err.add(i * n_devices, error_train.d.copy())
monitor_time.add(i * n_devices)
# exit(0)
if mpi_rank == 0:
nn.save_parameters(os.path.join(
args.model_save_path,
'params_%06d.h5' % (args.max_iter / n_devices)))
comm.barrier()
if __name__ == '__main__':
"""
Call this script with `mpirun` or `mpiexec`
$ mpirun -n 4 python multi_device_multi_process.py --context "cudnn" -bs 64
"""
train()
| 38.268775
| 96
| 0.664532
|
a39d23037248b808541df07f89166fbe78dd4d0f
| 790
|
py
|
Python
|
src/util/decorate_all.py
|
pgecsenyi/fst
|
1d4f579fb3cccd022fe1ab0e61aa00693e7234c1
|
[
"MIT"
] | 1
|
2019-12-04T20:35:34.000Z
|
2019-12-04T20:35:34.000Z
|
src/util/decorate_all.py
|
pgecsenyi/router-fs
|
1d4f579fb3cccd022fe1ab0e61aa00693e7234c1
|
[
"MIT"
] | null | null | null |
src/util/decorate_all.py
|
pgecsenyi/router-fs
|
1d4f579fb3cccd022fe1ab0e61aa00693e7234c1
|
[
"MIT"
] | null | null | null |
from types import FunctionType
def decorate_all(decorator):
class DecorateAll(type):
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if do_decorate(attr, value):
dct[attr] = decorator(value)
return super(DecorateAll, cls).__new__(cls, name, bases, dct)
def __setattr__(cls, attr, value):
if do_decorate(attr, value):
value = decorator(value)
super(DecorateAll, cls).__setattr__(attr, value)
return DecorateAll
def do_decorate(attr, value):
return (
'__' not in attr and
isinstance(value, FunctionType) and
getattr(value, 'decorate', True))
def dont_decorate(func):
func.decorate = False
return func
| 23.939394
| 73
| 0.601266
|
7d5b0a3f77f58e673c6e7a5c6d5912711653bd8c
| 4,335
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/databoxedge/v20200501preview/get_bandwidth_schedule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/databoxedge/v20200501preview/get_bandwidth_schedule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/databoxedge/v20200501preview/get_bandwidth_schedule.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetBandwidthScheduleResult',
'AwaitableGetBandwidthScheduleResult',
'get_bandwidth_schedule',
]
@pulumi.output_type
class GetBandwidthScheduleResult:
"""
The bandwidth schedule details.
"""
def __init__(__self__, days=None, name=None, rate_in_mbps=None, start=None, stop=None, type=None):
if days and not isinstance(days, list):
raise TypeError("Expected argument 'days' to be a list")
pulumi.set(__self__, "days", days)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if rate_in_mbps and not isinstance(rate_in_mbps, int):
raise TypeError("Expected argument 'rate_in_mbps' to be a int")
pulumi.set(__self__, "rate_in_mbps", rate_in_mbps)
if start and not isinstance(start, str):
raise TypeError("Expected argument 'start' to be a str")
pulumi.set(__self__, "start", start)
if stop and not isinstance(stop, str):
raise TypeError("Expected argument 'stop' to be a str")
pulumi.set(__self__, "stop", stop)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def days(self) -> Sequence[str]:
"""
The days of the week when this schedule is applicable.
"""
return pulumi.get(self, "days")
@property
@pulumi.getter
def name(self) -> str:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="rateInMbps")
def rate_in_mbps(self) -> int:
"""
The bandwidth rate in Mbps.
"""
return pulumi.get(self, "rate_in_mbps")
@property
@pulumi.getter
def start(self) -> str:
"""
The start time of the schedule in UTC.
"""
return pulumi.get(self, "start")
@property
@pulumi.getter
def stop(self) -> str:
"""
The stop time of the schedule in UTC.
"""
return pulumi.get(self, "stop")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
class AwaitableGetBandwidthScheduleResult(GetBandwidthScheduleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBandwidthScheduleResult(
days=self.days,
name=self.name,
rate_in_mbps=self.rate_in_mbps,
start=self.start,
stop=self.stop,
type=self.type)
def get_bandwidth_schedule(device_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBandwidthScheduleResult:
"""
Use this data source to access information about an existing resource.
:param str device_name: The device name.
:param str name: The bandwidth schedule name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['deviceName'] = device_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:databoxedge/v20200501preview:getBandwidthSchedule', __args__, opts=opts, typ=GetBandwidthScheduleResult).value
return AwaitableGetBandwidthScheduleResult(
days=__ret__.days,
name=__ret__.name,
rate_in_mbps=__ret__.rate_in_mbps,
start=__ret__.start,
stop=__ret__.stop,
type=__ret__.type)
| 32.593985
| 161
| 0.626759
|
ecad6c8921aa07d60c8d34e3b3f75a91bb4cf221
| 2,650
|
py
|
Python
|
posthog/models/filters/mixins/stickiness.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
posthog/models/filters/mixins/stickiness.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
posthog/models/filters/mixins/stickiness.py
|
alx-a/posthog
|
a76959bb2a7640ca8cf367a4d3a0e4ca67f65a5e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from typing import Callable, Optional, Union
from rest_framework.exceptions import ValidationError
from posthog.constants import DATE_FROM, DATE_TO, STICKINESS_DAYS
from posthog.models.filters.mixins.common import BaseParamMixin, DateMixin, IntervalMixin
from posthog.models.filters.mixins.utils import cached_property, include_dict
from posthog.models.team import Team
from posthog.utils import relative_date_parse
class SelectedIntervalMixin(BaseParamMixin):
@cached_property
def selected_interval(self) -> int:
return int(self._data.get(STICKINESS_DAYS, "0")) or int(self._data.get("selected_interval", "0"))
@include_dict
def selected_interval_to_dict(self):
return {"selected_interval": self.selected_interval} if self.selected_interval else {}
class StickinessDateMixin(DateMixin):
get_earliest_timestamp: Callable
team: Team
@cached_property
def _date_from(self) -> Optional[Union[str, datetime]]:
if not self.team or not self.get_earliest_timestamp:
raise AttributeError("StickinessDateMixin requires team and get_earliest_timestamp to be provided")
_date_from = self._data.get(DATE_FROM, None)
if _date_from == "all":
return self.get_earliest_timestamp(team_id=self.team.pk)
elif _date_from:
return _date_from
else:
return relative_date_parse("-7d")
@cached_property
def _date_to(self) -> Optional[Union[str, datetime]]:
return self._data.get(DATE_TO)
class TotalIntervalsDerivedMixin(IntervalMixin, StickinessDateMixin):
"""
Properties
-----------
- total_intervals
- date_from (inherited)
- date_to (inherited)
- interval (inherited)
"""
@cached_property
def total_intervals(self) -> int:
_num_intervals = 0
_total_seconds = (self.date_to - self.date_from).total_seconds()
if self.interval == "minute":
_num_intervals = int(divmod(_total_seconds, 60)[0])
elif self.interval == "hour":
_num_intervals = int(divmod(_total_seconds, 3600)[0])
elif self.interval == "day":
_num_intervals = int(divmod(_total_seconds, 86400)[0])
elif self.interval == "week":
_num_intervals = (self.date_to - self.date_from).days // 7
elif self.interval == "month":
_num_intervals = (self.date_to.year - self.date_from.year) + (self.date_to.month - self.date_from.month)
else:
raise ValidationError(f"{self.interval} not supported")
_num_intervals += 2
return _num_intervals
| 36.30137
| 116
| 0.690566
|
201c4e761c104ba2860fd932d4670576d8fd6ba2
| 409
|
py
|
Python
|
venv/Scripts/pip-script.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip-script.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
venv/Scripts/pip-script.py
|
Shubhraaaj/Blogging
|
b57d3bceed2c9ac661f0a3eafc8c2b948a5ee3ce
|
[
"Apache-2.0"
] | null | null | null |
#!C:\Users\shubh\PycharmProjects\Python_1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.3','console_scripts','pip'
__requires__ = 'pip==9.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.3', 'console_scripts', 'pip')()
)
| 31.461538
| 69
| 0.667482
|
926ab9082f5cea5eb161c160242dbca8da3eaa72
| 442
|
py
|
Python
|
src/deprecated_src_files/cropper.py
|
IW276/IW276WS21-P20
|
06680a879b34166dc9104004686b195803b3bb56
|
[
"MIT"
] | null | null | null |
src/deprecated_src_files/cropper.py
|
IW276/IW276WS21-P20
|
06680a879b34166dc9104004686b195803b3bb56
|
[
"MIT"
] | null | null | null |
src/deprecated_src_files/cropper.py
|
IW276/IW276WS21-P20
|
06680a879b34166dc9104004686b195803b3bb56
|
[
"MIT"
] | null | null | null |
from cv2 import imread, IMREAD_COLOR
def create_crops(image_path, crop_coordinates):
resulting_crops = []
image = imread(image_path, IMREAD_COLOR)
for row in crop_coordinates.itertuples():
top = row[3]
left = row[4]
height = row[5]
width = row[6]
resulting_crops.append(
image[left:left + width,
top:top + height].copy()
)
return resulting_crops
| 26
| 47
| 0.597285
|
8182bb40615f6d27e1dc8e9bbc613289874c1d39
| 1,230
|
py
|
Python
|
bin/combine_pdf.py
|
aerorahul/dotfiles
|
4b0031d303b7db778229c6e88a8926e1886133e0
|
[
"MIT"
] | null | null | null |
bin/combine_pdf.py
|
aerorahul/dotfiles
|
4b0031d303b7db778229c6e88a8926e1886133e0
|
[
"MIT"
] | null | null | null |
bin/combine_pdf.py
|
aerorahul/dotfiles
|
4b0031d303b7db778229c6e88a8926e1886133e0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################
# combine_pdf.py - script used to combine multiple pdf's into one
###############################################################
import os
import sys
import subprocess
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description='Combine multiple pdf files into one',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input', help='input files',
nargs='+', required=True)
parser.add_argument('-o', '--output', help='output file',
type=str, default='test.pdf', required=False)
args = parser.parse_args()
input_files = ' '.join(args.input)
output_file = args.output
if os.path.exists(output_file):
print('%s exists ...' % output_file)
input = input('overwrite %s? yes or no: ' % output_file)
if (input.lower() not in ['y', 'yes']):
sys.exit(0)
cmd = f'gs -dBATCH -dNOPAUSE -q -sDEVICE=pdfwrite -sOutputFile={output_file} {input_files}'
try:
subprocess.check_call(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
sys.exit(0)
| 33.243243
| 91
| 0.614634
|
fd4cd54f45ed30c86f86d9c3b07c14e881463d5b
| 42,000
|
py
|
Python
|
discord/webhook.py
|
niborus/discord.py
|
23aaa75802e46ea13f49375c6abdc48e7b8c40dc
|
[
"MIT"
] | 1
|
2021-05-01T13:52:04.000Z
|
2021-05-01T13:52:04.000Z
|
discord/webhook.py
|
niborus/discord.py
|
23aaa75802e46ea13f49375c6abdc48e7b8c40dc
|
[
"MIT"
] | null | null | null |
discord/webhook.py
|
niborus/discord.py
|
23aaa75802e46ea13f49375c6abdc48e7b8c40dc
|
[
"MIT"
] | 1
|
2021-09-13T12:23:38.000Z
|
2021-09-13T12:23:38.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import logging
import asyncio
import json
import time
import re
from urllib.parse import quote as _uriquote
import aiohttp
from . import utils
from .errors import InvalidArgument, HTTPException, Forbidden, NotFound, DiscordServerError
from .message import Message
from .enums import try_enum, WebhookType
from .user import BaseUser, User
from .asset import Asset
from .mixins import Hashable
__all__ = (
'WebhookAdapter',
'AsyncWebhookAdapter',
'RequestsWebhookAdapter',
'Webhook',
'WebhookMessage',
'PartialWebhookChannel',
'PartialWebhookGuild'
)
log = logging.getLogger(__name__)
class PartialWebhookChannel(Hashable):
"""Represents a partial channel for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial channel's ID.
name: :class:`str`
The partial channel's name.
"""
__slots__ = ('id', 'name')
def __init__(self, *, data):
self.id = int(data['id'])
self.name = data['name']
def __repr__(self):
return f'<PartialWebhookChannel name={self.name!r} id={self.id}>'
class PartialWebhookGuild(Hashable):
"""Represents a partial guild for webhooks.
These are typically given for channel follower webhooks.
.. versionadded:: 2.0
Attributes
-----------
id: :class:`int`
The partial guild's ID.
name: :class:`str`
The partial guild's name.
icon: :class:`str`
The partial guild's icon
"""
__slots__ = ('id', 'name', 'icon', '_state')
def __init__(self, *, data, state):
self._state = state
self.id = int(data['id'])
self.name = data['name']
self.icon = data['icon']
def __repr__(self):
return f'<PartialWebhookGuild name={self.name!r} id={self.id}>'
@property
def icon_url(self):
""":class:`Asset`: Returns the guild's icon asset."""
return self.icon_url_as()
def is_icon_animated(self):
""":class:`bool`: Returns True if the guild has an animated icon."""
return bool(self.icon and self.icon.startswith('a_'))
def icon_url_as(self, *, format=None, static_format='webp', size=1024):
"""Returns an :class:`Asset` for the guild's icon.
The format must be one of 'webp', 'jpeg', 'jpg', 'png' or 'gif', and
'gif' is only valid for animated avatars. The size must be a power of 2
between 16 and 4096.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the icon to.
If the format is ``None``, then it is automatically
detected into either 'gif' or static_format depending on the
icon being animated or not.
static_format: Optional[:class:`str`]
Format to attempt to convert only non-animated icons to.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_guild_icon(self._state, self, format=format, static_format=static_format, size=size)
class WebhookAdapter:
"""Base class for all webhook adapters.
Attributes
------------
webhook: :class:`Webhook`
The webhook that owns this adapter.
"""
BASE = 'https://discord.com/api/v8'
def _prepare(self, webhook):
self._webhook_id = webhook.id
self._webhook_token = webhook.token
self._request_url = f'{self.BASE}/webhooks/{webhook.id}/{webhook.token}'
self.webhook = webhook
def is_async(self):
return False
def request(self, verb, url, payload=None, multipart=None):
"""Actually does the request.
Subclasses must implement this.
Parameters
-----------
verb: :class:`str`
The HTTP verb to use for the request.
url: :class:`str`
The URL to send the request to. This will have
the query parameters already added to it, if any.
multipart: Optional[:class:`dict`]
A dict containing multipart form data to send with
the request. If a filename is being uploaded, then it will
be under a ``file`` key which will have a 3-element :class:`tuple`
denoting ``(filename, file, content_type)``.
payload: Optional[:class:`dict`]
The JSON to send with the request, if any.
"""
raise NotImplementedError()
def delete_webhook(self, *, reason=None):
return self.request('DELETE', self._request_url, reason=reason)
def edit_webhook(self, *, reason=None, **payload):
return self.request('PATCH', self._request_url, payload=payload, reason=reason)
def edit_webhook_message(self, message_id, payload):
return self.request('PATCH', f'{self._request_url}/messages/{message_id}', payload=payload)
def delete_webhook_message(self, message_id):
return self.request('DELETE', f'{self._request_url}/messages/{message_id}')
def handle_execution_response(self, data, *, wait):
"""Transforms the webhook execution response into something
more meaningful.
This is mainly used to convert the data into a :class:`Message`
if necessary.
Subclasses must implement this.
Parameters
------------
data
The data that was returned from the request.
wait: :class:`bool`
Whether the webhook execution was asked to wait or not.
"""
raise NotImplementedError()
async def _wrap_coroutine_and_cleanup(self, coro, cleanup):
try:
return await coro
finally:
cleanup()
def execute_webhook(self, *, payload, wait=False, file=None, files=None):
cleanup = None
if file is not None:
multipart = {
'file': (file.filename, file.fp, 'application/octet-stream'),
'payload_json': utils.to_json(payload)
}
data = None
cleanup = file.close
files_to_pass = [file]
elif files is not None:
multipart = {
'payload_json': utils.to_json(payload)
}
for i, file in enumerate(files):
multipart['file%i' % i] = (file.filename, file.fp, 'application/octet-stream')
data = None
def _anon():
for f in files:
f.close()
cleanup = _anon
files_to_pass = files
else:
data = payload
multipart = None
files_to_pass = None
url = f'{self._request_url}?wait={int(wait)}'
maybe_coro = None
try:
maybe_coro = self.request('POST', url, multipart=multipart, payload=data, files=files_to_pass)
finally:
if maybe_coro is not None and cleanup is not None:
if not asyncio.iscoroutine(maybe_coro):
cleanup()
else:
maybe_coro = self._wrap_coroutine_and_cleanup(maybe_coro, cleanup)
# if request raises up there then this should never be `None`
return self.handle_execution_response(maybe_coro, wait=wait)
class AsyncWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with aiohttp.
.. note::
You are responsible for cleaning up the client session.
Parameters
-----------
session: :class:`aiohttp.ClientSession`
The session to use to send requests.
"""
def __init__(self, session):
self.session = session
self.loop = asyncio.get_event_loop()
def is_async(self):
return True
async def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
if multipart:
data = aiohttp.FormData()
for key, value in multipart.items():
if key.startswith('file'):
data.add_field(key, value[1], filename=value[0], content_type=value[2])
else:
data.add_field(key, value)
async with self.session.request(verb, url, headers=headers, data=data) as r:
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
# Coerce empty strings to return None for hygiene purposes
response = (await r.text(encoding='utf-8')) or None
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
await asyncio.sleep(delta)
if 300 > r.status >= 200:
return response
# we are being rate limited
if r.status == 429:
if not r.headers.get('Via'):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
await asyncio.sleep(retry_after)
continue
if r.status in (500, 502):
await asyncio.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
# no more retries
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
async def handle_execution_response(self, response, *, wait):
data = await response
if not wait:
return data
# transform into Message object
# Make sure to coerce the state to the partial one to allow message edits/delete
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=data, state=state, channel=self.webhook.channel)
class RequestsWebhookAdapter(WebhookAdapter):
"""A webhook adapter suited for use with ``requests``.
Only versions of :doc:`req:index` higher than 2.13.0 are supported.
Parameters
-----------
session: Optional[`requests.Session <http://docs.python-requests.org/en/latest/api/#requests.Session>`_]
The requests session to use for sending requests. If not given then
each request will create a new session. Note if a session is given,
the webhook adapter **will not** clean it up for you. You must close
the session yourself.
sleep: :class:`bool`
Whether to sleep the thread when encountering a 429 or pre-emptive
rate limit or a 5xx status code. Defaults to ``True``. If set to
``False`` then this will raise an :exc:`HTTPException` instead.
"""
def __init__(self, session=None, *, sleep=True):
import requests
self.session = session or requests
self.sleep = sleep
def request(self, verb, url, payload=None, multipart=None, *, files=None, reason=None):
headers = {}
data = None
files = files or []
if payload:
headers['Content-Type'] = 'application/json'
data = utils.to_json(payload)
if reason:
headers['X-Audit-Log-Reason'] = _uriquote(reason, safe='/ ')
if multipart is not None:
data = {'payload_json': multipart.pop('payload_json')}
base_url = url.replace(self._request_url, '/') or '/'
_id = self._webhook_id
for tries in range(5):
for file in files:
file.reset(seek=tries)
r = self.session.request(verb, url, headers=headers, data=data, files=multipart)
r.encoding = 'utf-8'
# Coerce empty responses to return None for hygiene purposes
response = r.text or None
# compatibility with aiohttp
r.status = r.status_code
log.debug('Webhook ID %s with %s %s has returned status code %s', _id, verb, base_url, r.status)
if r.headers['Content-Type'] == 'application/json':
response = json.loads(response)
# check if we have rate limit header information
remaining = r.headers.get('X-Ratelimit-Remaining')
if remaining == '0' and r.status != 429 and self.sleep:
delta = utils._parse_ratelimit_header(r)
log.debug('Webhook ID %s has been pre-emptively rate limited, waiting %.2f seconds', _id, delta)
time.sleep(delta)
if 300 > r.status >= 200:
return response
# we are being rate limited
if r.status == 429:
if self.sleep:
if not r.headers.get('Via'):
# Banned by Cloudflare more than likely.
raise HTTPException(r, data)
retry_after = response['retry_after'] / 1000.0
log.warning('Webhook ID %s is rate limited. Retrying in %.2f seconds', _id, retry_after)
time.sleep(retry_after)
continue
else:
raise HTTPException(r, response)
if self.sleep and r.status in (500, 502):
time.sleep(1 + tries * 2)
continue
if r.status == 403:
raise Forbidden(r, response)
elif r.status == 404:
raise NotFound(r, response)
else:
raise HTTPException(r, response)
# no more retries
if r.status >= 500:
raise DiscordServerError(r, response)
raise HTTPException(r, response)
def handle_execution_response(self, response, *, wait):
if not wait:
return response
# transform into Message object
# Make sure to coerce the state to the partial one to allow message edits/delete
state = _PartialWebhookState(self, self.webhook, parent=self.webhook._state)
return WebhookMessage(data=response, state=state, channel=self.webhook.channel)
class _FriendlyHttpAttributeErrorHelper:
__slots__ = ()
def __getattr__(self, attr):
raise AttributeError('PartialWebhookState does not support http methods.')
class _PartialWebhookState:
__slots__ = ('loop', 'parent', '_webhook')
def __init__(self, adapter, webhook, parent):
self._webhook = webhook
if isinstance(parent, self.__class__):
self.parent = None
else:
self.parent = parent
# Fetch the loop from the adapter if it's there
try:
self.loop = adapter.loop
except AttributeError:
self.loop = None
def _get_guild(self, guild_id):
return None
def store_user(self, data):
return BaseUser(state=self, data=data)
@property
def http(self):
if self.parent is not None:
return self.parent.http
# Some data classes assign state.http and that should be kosher
# however, using it should result in a late-binding error.
return _FriendlyHttpAttributeErrorHelper()
def __getattr__(self, attr):
if self.parent is not None:
return getattr(self.parent, attr)
raise AttributeError(f'PartialWebhookState does not support {attr!r}.')
class WebhookMessage(Message):
"""Represents a message sent from your webhook.
This allows you to edit or delete a message sent by your
webhook.
This inherits from :class:`discord.Message` with changes to
:meth:`edit` and :meth:`delete` to work.
.. versionadded:: 1.6
"""
def edit(self, **fields):
"""|maybecoro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionadded:: 1.6
Parameters
------------
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
"""
return self._state._webhook.edit_message(self.id, **fields)
def _delete_delay_sync(self, delay):
time.sleep(delay)
return self._state._webhook.delete_message(self.id)
async def _delete_delay_async(self, delay):
async def inner_call():
await asyncio.sleep(delay)
try:
await self._state._webhook.delete_message(self.id)
except HTTPException:
pass
asyncio.create_task(inner_call())
return await asyncio.sleep(0)
def delete(self, *, delay=None):
"""|coro|
Deletes the message.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait before deleting the message.
If this is a coroutine, the waiting is done in the background and deletion failures
are ignored. If this is not a coroutine then the delay blocks the thread.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already.
HTTPException
Deleting the message failed.
"""
if delay is not None:
if self._state.parent._adapter.is_async():
return self._delete_delay_async(delay)
else:
return self._delete_delay_sync(delay)
return self._state._webhook.delete_message(self.id)
class Webhook(Hashable):
"""Represents a Discord webhook.
Webhooks are a form to send messages to channels in Discord without a
bot user or authentication.
There are two main ways to use Webhooks. The first is through the ones
received by the library such as :meth:`.Guild.webhooks` and
:meth:`.TextChannel.webhooks`. The ones received by the library will
automatically have an adapter bound using the library's HTTP session.
Those webhooks will have :meth:`~.Webhook.send`, :meth:`~.Webhook.delete` and
:meth:`~.Webhook.edit` as coroutines.
The second form involves creating a webhook object manually without having
it bound to a websocket connection using the :meth:`~.Webhook.from_url` or
:meth:`~.Webhook.partial` classmethods. This form allows finer grained control
over how requests are done, allowing you to mix async and sync code using either
:doc:`aiohttp <aio:index>` or :doc:`req:index`.
For example, creating a webhook from a URL and using :doc:`aiohttp <aio:index>`:
.. code-block:: python3
from discord import Webhook, AsyncWebhookAdapter
import aiohttp
async def foo():
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url('url-here', adapter=AsyncWebhookAdapter(session))
await webhook.send('Hello World', username='Foo')
Or creating a webhook from an ID and token and using :doc:`req:index`:
.. code-block:: python3
import requests
from discord import Webhook, RequestsWebhookAdapter
webhook = Webhook.partial(123456, 'abcdefg', adapter=RequestsWebhookAdapter())
webhook.send('Hello World', username='Foo')
.. container:: operations
.. describe:: x == y
Checks if two webhooks are equal.
.. describe:: x != y
Checks if two webhooks are not equal.
.. describe:: hash(x)
Returns the webhooks's hash.
.. versionchanged:: 1.4
Webhooks are now comparable and hashable.
Attributes
------------
id: :class:`int`
The webhook's ID
type: :class:`WebhookType`
The type of the webhook.
.. versionadded:: 1.3
token: Optional[:class:`str`]
The authentication token of the webhook. If this is ``None``
then the webhook cannot be used to make requests.
guild_id: Optional[:class:`int`]
The guild ID this webhook is for.
channel_id: Optional[:class:`int`]
The channel ID this webhook is for.
user: Optional[:class:`abc.User`]
The user this webhook was created by. If the webhook was
received without authentication then this will be ``None``.
name: Optional[:class:`str`]
The default name of the webhook.
avatar: Optional[:class:`str`]
The default avatar of the webhook.
source_guild: Optional[:class:`PartialWebhookGuild`]
The guild of the channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
source_channel: Optional[:class:`PartialWebhookChannel`]
The channel that this webhook is following.
Only given if :attr:`type` is :attr:`WebhookType.channel_follower`.
.. versionadded:: 2.0
"""
__slots__ = ('id', 'type', 'guild_id', 'channel_id', 'user', 'name',
'avatar', 'token', '_state', '_adapter', 'source_channel', 'source_guild')
def __init__(self, data, *, adapter, state=None):
self.id = int(data['id'])
self.type = try_enum(WebhookType, int(data['type']))
self.channel_id = utils._get_as_snowflake(data, 'channel_id')
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self.name = data.get('name')
self.avatar = data.get('avatar')
self.token = data.get('token')
self._state = state or _PartialWebhookState(adapter, self, parent=state)
self._adapter = adapter
self._adapter._prepare(self)
user = data.get('user')
if user is None:
self.user = None
elif state is None:
self.user = BaseUser(state=None, data=user)
else:
self.user = User(state=state, data=user)
source_channel = data.get('source_channel')
if source_channel:
source_channel = PartialWebhookChannel(data=source_channel)
self.source_channel = source_channel
source_guild = data.get('source_guild')
if source_guild:
source_guild = PartialWebhookGuild(data=source_guild, state=state)
self.source_guild = source_guild
def __repr__(self):
return f'<Webhook id={self.id!r}>'
@property
def url(self):
""":class:`str` : Returns the webhook's url."""
return f'https://discord.com/api/webhooks/{self.id}/{self.token}'
@classmethod
def partial(cls, id, token, *, adapter):
"""Creates a partial :class:`Webhook`.
Parameters
-----------
id: :class:`int`
The ID of the webhook.
token: :class:`str`
The authentication token of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for :doc:`aiohttp <aio:index>` or
:class:`RequestsWebhookAdapter` for :doc:`req:index`.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
if not isinstance(adapter, WebhookAdapter):
raise TypeError('adapter must be a subclass of WebhookAdapter')
data = {
'id': id,
'type': 1,
'token': token
}
return cls(data, adapter=adapter)
@classmethod
def from_url(cls, url, *, adapter):
"""Creates a partial :class:`Webhook` from a webhook URL.
Parameters
------------
url: :class:`str`
The URL of the webhook.
adapter: :class:`WebhookAdapter`
The webhook adapter to use when sending requests. This is
typically :class:`AsyncWebhookAdapter` for :doc:`aiohttp <aio:index>` or
:class:`RequestsWebhookAdapter` for :doc:`req:index`.
Raises
-------
InvalidArgument
The URL is invalid.
Returns
--------
:class:`Webhook`
A partial :class:`Webhook`.
A partial webhook is just a webhook object with an ID and a token.
"""
m = re.search(r'discord(?:app)?.com/api/webhooks/(?P<id>[0-9]{17,20})/(?P<token>[A-Za-z0-9\.\-\_]{60,68})', url)
if m is None:
raise InvalidArgument('Invalid webhook URL given.')
data = m.groupdict()
data['type'] = 1
return cls(data, adapter=adapter)
@classmethod
def _as_follower(cls, data, *, channel, user):
name = f"{channel.guild} #{channel}"
feed = {
'id': data['webhook_id'],
'type': 2,
'name': name,
'channel_id': channel.id,
'guild_id': channel.guild.id,
'user': {
'username': user.name,
'discriminator': user.discriminator,
'id': user.id,
'avatar': user.avatar
}
}
session = channel._state.http._HTTPClient__session
return cls(feed, adapter=AsyncWebhookAdapter(session=session))
@classmethod
def from_state(cls, data, state):
session = state.http._HTTPClient__session
return cls(data, adapter=AsyncWebhookAdapter(session=session), state=state)
@property
def guild(self):
"""Optional[:class:`Guild`]: The guild this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
return self._state._get_guild(self.guild_id)
@property
def channel(self):
"""Optional[:class:`TextChannel`]: The text channel this webhook belongs to.
If this is a partial webhook, then this will always return ``None``.
"""
guild = self.guild
return guild and guild.get_channel(self.channel_id)
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the webhook's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def avatar_url(self):
""":class:`Asset`: Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, an asset for
the default avatar is returned instead.
This is equivalent to calling :meth:`avatar_url_as` with the
default parameters.
"""
return self.avatar_url_as()
def avatar_url_as(self, *, format=None, size=1024):
"""Returns an :class:`Asset` for the avatar the webhook has.
If the webhook does not have a traditional avatar, an asset for
the default avatar is returned instead.
The format must be one of 'jpeg', 'jpg', or 'png'.
The size must be a power of 2 between 16 and 1024.
Parameters
-----------
format: Optional[:class:`str`]
The format to attempt to convert the avatar to.
If the format is ``None``, then it is equivalent to png.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
if self.avatar is None:
# Default is always blurple apparently
return Asset(self._state, '/embed/avatars/0.png')
if not utils.valid_icon_size(size):
raise InvalidArgument("size must be a power of 2 between 16 and 1024")
format = format or 'png'
if format not in ('png', 'jpg', 'jpeg'):
raise InvalidArgument("format must be one of 'png', 'jpg', or 'jpeg'.")
url = f'/avatars/{self.id}/{self.avatar}.{format}?size={size}'
return Asset(self._state, url)
def delete(self, *, reason=None):
"""|maybecoro|
Deletes this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Parameters
------------
reason: Optional[:class:`str`]
The reason for deleting this webhook. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Deleting the webhook failed.
NotFound
This webhook does not exist.
Forbidden
You do not have permissions to delete this webhook.
InvalidArgument
This webhook does not have a token associated with it.
"""
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
return self._adapter.delete_webhook(reason=reason)
def edit(self, *, reason=None, **kwargs):
"""|maybecoro|
Edits this Webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
Parameters
------------
name: Optional[:class:`str`]
The webhook's new default name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's new default avatar.
reason: Optional[:class:`str`]
The reason for editing this webhook. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Editing the webhook failed.
NotFound
This webhook does not exist.
InvalidArgument
This webhook does not have a token associated with it.
"""
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
payload = {}
try:
name = kwargs['name']
except KeyError:
pass
else:
if name is not None:
payload['name'] = str(name)
else:
payload['name'] = None
try:
avatar = kwargs['avatar']
except KeyError:
pass
else:
if avatar is not None:
payload['avatar'] = utils._bytes_to_base64_data(avatar)
else:
payload['avatar'] = None
return self._adapter.edit_webhook(reason=reason, **payload)
def send(self, content=None, *, wait=False, username=None, avatar_url=None, tts=False,
file=None, files=None, embed=None, embeds=None, allowed_mentions=None):
"""|maybecoro|
Sends a message using the webhook.
If the webhook is constructed with a :class:`RequestsWebhookAdapter` then this is
not a coroutine.
The content must be a type that can convert to a string through ``str(content)``.
To upload a single file, the ``file`` parameter should be used with a
single :class:`File` object.
If the ``embed`` parameter is provided, it must be of type :class:`Embed` and
it must be a rich embed type. You cannot mix the ``embed`` parameter with the
``embeds`` parameter, which must be a :class:`list` of :class:`Embed` objects to send.
Parameters
------------
content: :class:`str`
The content of the message to send.
wait: :class:`bool`
Whether the server should wait before sending a response. This essentially
means that the return type of this function changes from ``None`` to
a :class:`WebhookMessage` if set to ``True``.
username: :class:`str`
The username to send with this message. If no username is provided
then the default username for the webhook is used.
avatar_url: Union[:class:`str`, :class:`Asset`]
The avatar URL to send with this message. If no avatar URL is provided
then the default avatar for the webhook is used.
tts: :class:`bool`
Indicates if the message should be sent using text-to-speech.
file: :class:`File`
The file to upload. This cannot be mixed with ``files`` parameter.
files: List[:class:`File`]
A list of files to send with the content. This cannot be mixed with the
``file`` parameter.
embed: :class:`Embed`
The rich embed for the content to send. This cannot be mixed with
``embeds`` parameter.
embeds: List[:class:`Embed`]
A list of embeds to send with the content. Maximum of 10. This cannot
be mixed with the ``embed`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
.. versionadded:: 1.4
Raises
--------
HTTPException
Sending the message failed.
NotFound
This webhook was not found.
Forbidden
The authorization token for the webhook is incorrect.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
Returns
---------
Optional[:class:`WebhookMessage`]
The message that was sent.
"""
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
if files is not None and file is not None:
raise InvalidArgument('Cannot mix file and files keyword arguments.')
if embeds is not None and embed is not None:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments.')
if embeds is not None:
if len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements.')
payload['embeds'] = [e.to_dict() for e in embeds]
if embed is not None:
payload['embeds'] = [embed.to_dict()]
if content is not None:
payload['content'] = str(content)
payload['tts'] = tts
if avatar_url:
payload['avatar_url'] = str(avatar_url)
if username:
payload['username'] = username
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.execute_webhook(wait=wait, file=file, files=files, payload=payload)
def execute(self, *args, **kwargs):
"""An alias for :meth:`~.Webhook.send`."""
return self.send(*args, **kwargs)
def edit_message(self, message_id, **fields):
"""|maybecoro|
Edits a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.edit` in case
you only have an ID.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to edit.
content: Optional[:class:`str`]
The content to edit the message with or ``None`` to clear it.
embeds: List[:class:`Embed`]
A list of embeds to edit the message with.
embed: Optional[:class:`Embed`]
The embed to edit the message with. ``None`` suppresses the embeds.
This should not be mixed with the ``embeds`` parameter.
allowed_mentions: :class:`AllowedMentions`
Controls the mentions being processed in this message.
See :meth:`.abc.Messageable.send` for more information.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Edited a message that is not yours.
InvalidArgument
You specified both ``embed`` and ``embeds`` or the length of
``embeds`` was invalid or there was no token associated with
this webhook.
"""
payload = {}
if self.token is None:
raise InvalidArgument('This webhook does not have a token associated with it')
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
content = str(content)
payload['content'] = content
# Check if the embeds interface is being used
try:
embeds = fields['embeds']
except KeyError:
# Nope
pass
else:
if embeds is None or len(embeds) > 10:
raise InvalidArgument('embeds has a maximum of 10 elements')
payload['embeds'] = [e.to_dict() for e in embeds]
try:
embed = fields['embed']
except KeyError:
pass
else:
if 'embeds' in payload:
raise InvalidArgument('Cannot mix embed and embeds keyword arguments')
if embed is None:
payload['embeds'] = []
else:
payload['embeds'] = [embed.to_dict()]
allowed_mentions = fields.pop('allowed_mentions', None)
previous_mentions = getattr(self._state, 'allowed_mentions', None)
if allowed_mentions:
if previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.merge(allowed_mentions).to_dict()
else:
payload['allowed_mentions'] = allowed_mentions.to_dict()
elif previous_mentions is not None:
payload['allowed_mentions'] = previous_mentions.to_dict()
return self._adapter.edit_webhook_message(message_id, payload=payload)
def delete_message(self, message_id):
"""|maybecoro|
Deletes a message owned by this webhook.
This is a lower level interface to :meth:`WebhookMessage.delete` in case
you only have an ID.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to delete.
Raises
-------
HTTPException
Deleting the message failed.
Forbidden
Deleted a message that is not yours.
"""
return self._adapter.delete_webhook_message(message_id)
| 34.511093
| 120
| 0.593071
|
ffcff8ad8e0b39d7965dd2c25331a3dfad136954
| 648
|
py
|
Python
|
Coursera/Week.2/Task.46.py
|
v1nnyb0y/Coursera.BasePython
|
bbfb3184dc27a4cdb16b087123890991afbc5506
|
[
"MIT"
] | null | null | null |
Coursera/Week.2/Task.46.py
|
v1nnyb0y/Coursera.BasePython
|
bbfb3184dc27a4cdb16b087123890991afbc5506
|
[
"MIT"
] | null | null | null |
Coursera/Week.2/Task.46.py
|
v1nnyb0y/Coursera.BasePython
|
bbfb3184dc27a4cdb16b087123890991afbc5506
|
[
"MIT"
] | null | null | null |
'''
Максимальное число подряд идущих равных
'''
a = int(input())
b = int(input())
max = 1
n = 1
while 0 == 0:
if b == 0 or a == 0:
break
while a > b:
if b == 0:
break
n += 1
if n > max:
max = n
a = b
b = int(input())
else:
a = b
b = int(input())
n = 1
while a < b:
n += 1
if n > max:
max = n
a = b
b = int(input())
else:
a = b
b = int(input())
n = 1
while a == b:
n = 1
a = b
b = int(input())
print(max)
| 17.052632
| 39
| 0.324074
|
5e82f4a790c103c9550fb48e550e7c2f038dca1f
| 2,645
|
py
|
Python
|
networkmonitor/src/cleanTime.py
|
luther38/NetworkMonitor
|
bd5e9336342e7af71c19ca484f8d386b6b79ad69
|
[
"MIT"
] | null | null | null |
networkmonitor/src/cleanTime.py
|
luther38/NetworkMonitor
|
bd5e9336342e7af71c19ca484f8d386b6b79ad69
|
[
"MIT"
] | null | null | null |
networkmonitor/src/cleanTime.py
|
luther38/NetworkMonitor
|
bd5e9336342e7af71c19ca484f8d386b6b79ad69
|
[
"MIT"
] | null | null | null |
import datetime
class CleanTime():
"""
This class cleans and checks the refresh timer.
If the value is higher then expected it will be cleaned up.
Methods:
"""
def __init__(self):
self.nextDay:bool = True
self.hour:int = 0
self.minute: int = 0
self.second: int = 0
pass
def CleanTime(self):
pass
def GetNextNodeRefreshTime(self, sleepTimer:int, lastRefresh:datetime):
"""
Returns the second value before nodes are checked again
:param sleepTimer = Contains the sleep timer value from config
:param lastRefresh = Contains the datetime value of the last refresh
"""
# Convert lastRefresh into long seconds based off sleepTimer value
a:float = sleepTimer / 60
s:str = str(a)
arr = s.split('.')
if arr.__len__() == 3:
self.__CleanSecondValue(lastRefresh.second + int(arr[3]))
self.__CleanMinuteValue(lastRefresh.minute + int(arr[1]))
self.__CleanHourValue(lastRefresh.hour + int(arr[0]))
elif arr.__len__() == 2:
self.__CleanMinuteValue(lastRefresh.second + int(arr[1]))
self.__CleanMinuteValue(lastRefresh.minute + int(arr[0]))
self.__CleanHourValue(lastRefresh.hour)
elif arr.__len__() == 1:
self.__CleanMinuteValue(lastRefresh.second + int(arr[1]))
lastRefresh.minute
lastRefresh.hour
return self.__GetMessage()
def __CleanSecondValue(self, second:int):
if second >= 60:
i = second - 60
self.second = 0
self.minute += i
else:
self.second = second
def __CleanMinuteValue(self, minute:int):
if minute >= 60:
i = minute - 60
self.minute = 0
self.hour += i
else:
self.minute = minute
def __CleanHourValue(self, hour:int):
if hour >= 24:
i = hour - 24
self.hour = 0
else:
self.hour = hour
def __GetMessage(self):
s:str = ""
m:str = ""
h:str = ""
if self.second <= 9:
i = str(self.second)
s = f"0{i}"
else:
s = self.second
if self.minute <= 9:
i = str(self.minute)
m = f"0{i}"
else:
m = self.minute
if self.hour <= 9:
i = str(self.hour)
h = f"0{i}"
else:
h = self.hour
return f"{h}:{m}:{s}"
| 26.989796
| 77
| 0.514178
|
2b785663df7a7f14cb6a2cef320e7e9ad4bde272
| 3,041
|
py
|
Python
|
samples/modules/tensorflow/magic_wand/train/train_test.py
|
lviala-zaack/zephyr
|
bf3c6e7ba415dd85f1b68eb69ea2779b234c686f
|
[
"Apache-2.0"
] | 6,224
|
2016-06-24T20:04:19.000Z
|
2022-03-31T20:33:45.000Z
|
samples/modules/tensorflow/magic_wand/train/train_test.py
|
Conexiotechnologies/zephyr
|
fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce
|
[
"Apache-2.0"
] | 32,027
|
2017-03-24T00:02:32.000Z
|
2022-03-31T23:45:53.000Z
|
samples/modules/tensorflow/magic_wand/train/train_test.py
|
Conexiotechnologies/zephyr
|
fde24ac1f25d09eb9722ce4edc6e2d3f844b5bce
|
[
"Apache-2.0"
] | 4,374
|
2016-08-11T07:28:47.000Z
|
2022-03-31T14:44:59.000Z
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for train.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import numpy as np
import tensorflow as tf
from train import build_cnn
from train import build_lstm
from train import load_data
from train import reshape_function
class TestTrain(unittest.TestCase):
def setUp(self): # pylint: disable=g-missing-super-call
self.seq_length = 128
self.train_len, self.train_data, self.valid_len, self.valid_data, \
self.test_len, self.test_data = \
load_data("./data/train", "./data/valid", "./data/test",
self.seq_length)
def test_load_data(self):
self.assertIsInstance(self.train_data, tf.data.Dataset)
self.assertIsInstance(self.valid_data, tf.data.Dataset)
self.assertIsInstance(self.test_data, tf.data.Dataset)
def test_build_net(self):
cnn, cnn_path = build_cnn(self.seq_length)
lstm, lstm_path = build_lstm(self.seq_length)
cnn_data = np.random.rand(60, 128, 3, 1)
lstm_data = np.random.rand(60, 128, 3)
cnn_prob = cnn(tf.constant(cnn_data, dtype="float32")).numpy()
lstm_prob = lstm(tf.constant(lstm_data, dtype="float32")).numpy()
self.assertIsInstance(cnn, tf.keras.Sequential)
self.assertIsInstance(lstm, tf.keras.Sequential)
self.assertEqual(cnn_path, "./netmodels/CNN")
self.assertEqual(lstm_path, "./netmodels/LSTM")
self.assertEqual(cnn_prob.shape, (60, 4))
self.assertEqual(lstm_prob.shape, (60, 4))
def test_reshape_function(self):
for data, label in self.train_data:
original_data_shape = data.numpy().shape
original_label_shape = label.numpy().shape
break
self.train_data = self.train_data.map(reshape_function)
for data, label in self.train_data:
reshaped_data_shape = data.numpy().shape
reshaped_label_shape = label.numpy().shape
break
self.assertEqual(
reshaped_data_shape,
(int(original_data_shape[0] * original_data_shape[1] / 3), 3, 1))
self.assertEqual(reshaped_label_shape, original_label_shape)
if __name__ == "__main__":
unittest.main()
| 38.493671
| 81
| 0.664255
|
a9380bd9fde05212416699847c1942e9dca3009b
| 10,001
|
py
|
Python
|
google/appengine/tools/gen_protorpc.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/gen_protorpc.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/gen_protorpc.py
|
vladushakov987/appengine_python3
|
0dd481c73e2537a50ee10f1b79cd65938087e555
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line tool for generating ProtoRPC definitions from descriptors."""
from future import standard_library
standard_library.install_aliases()
from builtins import str
import errno
import logging
import optparse
import os
import sys
from protorpc import descriptor
from protorpc import generate_python
from protorpc import protobuf
from protorpc import registry
from protorpc import transport
from protorpc import util
import six
EXCLUDED_PACKAGES = frozenset(['protorpc.registry',
'protorpc.messages',
'protorpc.descriptor',
'protorpc.message_types',
])
commands = {}
def usage():
"""Print usage help and exit with an error code."""
parser.print_help()
sys.exit(2)
def fatal_error(message):
"""Print fatal error messages exit with an error code.
Args:
message: Message to print to stderr before exit.
"""
sys.stderr.write(message)
sys.exit(1)
def open_input_file(filename):
"""Open file for reading.
Args:
filename: Name of input file to open or None to open stdin.
Returns:
Opened file if string provided, stdin if filename is None.
"""
# TODO(rafek): Detect missing or invalid files, generating user friendly
# error messages.
if filename is None:
return sys.stdin
else:
try:
return open(filename, 'rb')
except IOError as err:
fatal_error(str(err))
@util.positional(1)
def generate_file_descriptor(dest_dir, file_descriptor, force_overwrite):
"""Generate a single file descriptor to destination directory.
Will generate a single Python file from a file descriptor under dest_dir.
The sub-directory where the file is generated is determined by the package
name of descriptor.
Descriptors without package names will not be generated.
Descriptors that are part of the ProtoRPC distribution will not be generated.
Args:
dest_dir: Directory under which to generate files.
file_descriptor: FileDescriptor instance to generate source code from.
force_overwrite: If True, existing files will be overwritten.
"""
package = file_descriptor.package
if not package:
# TODO(rafek): Option to cause an error on this condition.
logging.warn('Will not generate descriptor without package name')
return
if package in EXCLUDED_PACKAGES:
logging.warn('Will not generate main ProtoRPC class %s' % package)
return
package_path = package.split('.')
directory = package_path[:-1]
package_file_name = package_path[-1]
directory_name = os.path.join(dest_dir, *directory)
output_file_name = os.path.join(directory_name,
'%s.py' % (package_file_name,))
try:
os.makedirs(directory_name)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not force_overwrite and os.path.exists(output_file_name):
logging.warn('Not overwriting %s with package %s',
output_file_name, package)
return
output_file = open(output_file_name, 'w')
logging.info('Writing package %s to %s',
file_descriptor.package, output_file_name)
generate_python.format_python_file(file_descriptor, output_file)
@util.positional(1)
def command(name, required=(), optional=()):
"""Decorator used for declaring commands used on command line.
Each command of this tool can have any number of sequential required
parameters and optional parameters. The required and optional parameters
will be displayed in the command usage. Arguments passed in to the command
are checked to ensure they have at least the required parameters and not
too many parameters beyond the optional ones. When there are not enough
or too few parameters the usage message is generated and the program exits
with an error code.
Functions decorated thus are added to commands by their name.
Resulting decorated functions will have required and optional attributes
assigned to them so that appear in the usage message.
Args:
name: Name of command that will follow the program name on the command line.
required: List of required parameter names as displayed in the usage
message.
optional: List of optional parameter names as displayed in the usage
message.
"""
def check_params_decorator(function):
def check_params_wrapper(options, *args):
if not (len(required) <= len(args) <= len(required) + len(optional)):
sys.stderr.write("Incorrect usage for command '%s'\n\n" % name)
usage()
function(options, *args)
check_params_wrapper.required = required
check_params_wrapper.optional = optional
commands[name] = check_params_wrapper
return check_params_wrapper
return check_params_decorator
@command('file', optional=['input-filename', 'output-filename'])
def file_command(options, input_filename=None, output_filename=None):
"""Generate a single descriptor file to Python.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileDescriptor from. If None
will read from stdin.
output_filename: File to write Python source code to. If None will
generate to stdout.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
if output_filename:
output_file = open(output_filename, 'w')
else:
output_file = sys.stdout
file_descriptor = protobuf.decode_message(descriptor.FileDescriptor,
descriptor_content)
generate_python.format_python_file(file_descriptor, output_file)
@command('fileset', optional=['filename'])
def fileset_command(options, input_filename=None):
"""Generate source directory structure from FileSet.
Args:
options: Parsed command line options.
input_filename: File to read protobuf FileSet from. If None will read from
stdin.
"""
with open_input_file(input_filename) as input_file:
descriptor_content = input_file.read()
dest_dir = os.path.expanduser(options.dest_dir)
if not os.path.isdir(dest_dir) and os.path.exists(dest_dir):
fatal_error("Destination '%s' is not a directory" % dest_dir)
file_set = protobuf.decode_message(descriptor.FileSet,
descriptor_content)
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
@command('registry',
required=['host'],
optional=['service-name', 'registry-path'])
def registry_command(options,
host,
service_name=None,
registry_path='/protorpc'):
"""Generate source directory structure from remote registry service.
Args:
options: Parsed command line options.
host: Web service host where registry service is located. May include
port.
service_name: Name of specific service to read. Will generate only Python
files that service is dependent on. If None, will generate source code
for all services known by the registry.
registry_path: Path to find registry if not the default 'protorpc'.
"""
dest_dir = os.path.expanduser(options.dest_dir)
url = 'http://%s%s' % (host, registry_path)
reg = registry.RegistryService.Stub(transport.HttpTransport(url))
if service_name is None:
service_names = [service.name for service in reg.services().services]
else:
service_names = [service_name]
file_set = reg.get_file_set(names=service_names).file_set
for file_descriptor in file_set.files:
generate_file_descriptor(dest_dir, file_descriptor=file_descriptor,
force_overwrite=options.force)
def make_opt_parser():
"""Create options parser with automatically generated command help.
Will iterate over all functions in commands and generate an appropriate
usage message for them with all their required and optional parameters.
"""
command_descriptions = []
for name in sorted(six.iterkeys(commands)):
command = commands[name]
params = ' '.join(['<%s>' % param for param in command.required] +
['[<%s>]' % param for param in command.optional])
command_descriptions.append('%%prog [options] %s %s' % (name, params))
command_usage = 'usage: %s\n' % '\n '.join(command_descriptions)
parser = optparse.OptionParser(usage=command_usage)
parser.add_option('-d', '--dest_dir',
dest='dest_dir',
default=os.getcwd(),
help='Write generated files to DIR',
metavar='DIR')
parser.add_option('-f', '--force',
action='store_true',
dest='force',
default=False,
help='Force overwrite of existing files')
return parser
parser = make_opt_parser()
def main():
# TODO(rafek): Customize verbosity.
logging.basicConfig(level=logging.INFO)
options, positional = parser.parse_args()
if not positional:
usage()
command_name = positional[0]
command = commands.get(command_name)
if not command:
sys.stderr.write("Unknown command '%s'\n\n" % command_name)
usage()
parameters = positional[1:]
command(options, *parameters)
if __name__ == '__main__':
main()
| 32.683007
| 80
| 0.69913
|
df1c08ad4f1e03bb72c5c8af0872403b8e289ffc
| 5,702
|
py
|
Python
|
configs/NoC/system_setup.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | 3
|
2021-04-24T16:08:00.000Z
|
2022-03-22T22:07:40.000Z
|
configs/NoC/system_setup.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | null | null | null |
configs/NoC/system_setup.py
|
Maiux92/gem5-NVM-multiple-memory-spaces
|
0996bfd34638a7f3f05382cc1e7a813a177eed7f
|
[
"MIT"
] | 1
|
2021-03-25T16:55:08.000Z
|
2021-03-25T16:55:08.000Z
|
#!/usr/bin/python3
import optparse
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../../gem5-20.1/configs/')
#addToPath('../../')
import json
import os
# from network import Network
from common import Options
from noc_network import NoCSystem
from noc_config import noc_config
#-------------------------------------------#
# Add Script arguments #
#-------------------------------------------#
parser = optparse.OptionParser()
# Options.addCommonOptions(parser)
# Options.addSEOptions(parser)
#Ruby.define_options(parser)
#Network.define_options(parser)
parser.add_option("--noc-config", type="string", default="noc_config.json",
help="""select noc configuration files (default: noc_config.json).""")
parser.add_option("--router-latency", action="store", type="int",
default=1,
help="""number of pipeline stages in the garnet router.
Has to be >= 1.
Can be over-ridden on a per router basis
in the topology file.""")
parser.add_option("--link-latency", action="store", type="int", default=1,
help="""latency of each link the simple/garnet networks.
Has to be >= 1.
Can be over-ridden on a per link basis
in the topology file.""")
parser.add_option("--link-width-bits", action="store", type="int",
default=128,
help="width in bits for all links inside garnet.")
parser.add_option("--vcs-per-vnet", action="store", type="int", default=4,
help="""number of virtual channels per virtual network
inside garnet network.""")
parser.add_option("--garnet-deadlock-threshold", action="store",
type="int", default=50000,
help="network-level deadlock threshold.")
parser.add_option("--nvm_type", action="store",
type="string", default='STTRAM',
help="Supported: SRAM, FRAM, MRAM, RRAM, STTRAM")
parser.add_option("--input-start-address", action="store",
type="int", default=0x60000000,
help="Input memory start address")
parser.add_option("--input-addr-size", action="store",
type="string", default='512MB',
help="Input memory address range")
#parser.add_option("--output-memtype", action="store",
# type="string", default='STTRAM',
# help="Supported: SRAM, FRAM, MRAM, RRAM, STTRAM")
parser.add_option("--output-start-address", action="store",
type="int", default=0x90000000,
help="Output memory start address")
parser.add_option("--output-addr-size", action="store",
type="string", default='512MB',
help="Output memory address range")
(options, args) = parser.parse_args()
file_path = os.path.dirname(os.path.abspath(__file__)) + \
"/" + \
options.noc_config
print("NVM: {}".format(options.nvm_type))
"""
with open(file_path, "r") as f:
#print(f.read())
options.map = json.loads(f.read())
options.mesh_rows = len(options.map)
"""
options.map = noc_config
options.mesh_rows = len(options.map)
#-------------------------------------------#
#-------------------------------------------#
# Create system #
#-------------------------------------------#
system = System(mem_mode = 'timing')
#-------------------------------------------#
#-------------------------------------------#
# SET system voltages #
#-------------------------------------------#
# Create a top-level voltage domain
system.voltage_domain = VoltageDomain()
# Create a source clock for the system and set the clock period
system.clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain = system.voltage_domain)
# Create a CPU voltage domain
system.cpu_voltage_domain = VoltageDomain()
# Create a separate clock domain for the CPUs
system.cpu_clk_domain = SrcClockDomain(clock = '1GHz',
voltage_domain = system.cpu_voltage_domain)
#-------------------------------------------#
#-------------------------------------------#
# Create NoCSystem #
#-------------------------------------------#
system.noc_network = NoCSystem()
system.noc_network.setup(system, options)
#-------------------------------------------#
#-------------------------------------------#
# Init CPUs process #
#-------------------------------------------#
# Multi-thread single file support
process = Process()
process.cmd = ['../benchmarks/a.out']
for (i, cpu) in enumerate(system.cpu):
# Re-enable for single-process multiple cpu + multiple parallel execution of same program
#process = Process(pid = (100 + i))
##process.cmd = ['../benchmarks/{}.out'.format(i)]
#process.cmd = ['../benchmarks/a.out']
cpu.workload = process
cpu.createThreads()
#-------------------------------------------#
root = Root(full_system = False, system = system)
#Simulation.run(options, root, system, FutureClass)
#-------------------------------------------#
# Create system instance and launch #
# simulation #
#-------------------------------------------#
m5.instantiate()
print("Beginning simulation!")
exit_event = m5.simulate()
#-------------------------------------------#
print('Exiting @ tick {} because {}'
.format(m5.curTick(), exit_event.getCause()))
| 34.557576
| 93
| 0.520344
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.