content stringlengths 5 1.05M |
|---|
from dataclasses import dataclass
from enum import Enum
@dataclass(frozen=True)
class Meta:
reporter: str
from_domain: str
dkim_domain: str
spf_domain: str
class Disposition(Enum):
NONE_VALUE = "none"
QUARANTINE = "quarantine"
REJECT = "reject"
@dataclass(frozen=True)
class DmarcResult:
disposition: Disposition
dkim_pass: bool
spf_pass: bool
dkim_aligned: bool
spf_aligned: bool
@property
def dmarc_compliant(self) -> bool:
return (self.dkim_aligned and self.dkim_pass) or (
self.spf_aligned and self.spf_pass
)
@dataclass(frozen=True)
class DmarcEvent:
count: int
meta: Meta
result: DmarcResult
|
from pathlib import Path
import sys
import logging
import pytest
from germanetpy.germanet import Germanet
import numpy as np
from lxml import etree as ET
from germanetpy.synset import ConRel
logger = logging.getLogger('logging_test_synset')
d = str(Path(__file__).parent.parent) + "/data"
try:
germanet_data = Germanet(d)
except ET.ParseError:
message = ("Unable to load GermaNet data at {0} . Aborting...").format(d)
logger.error(message,
ET.ParseError)
sys.exit(0)
except IOError:
message = ("GermaNet data not found at {0} . Aborting...").format(d)
logger.error(message, IOError)
sys.exit(0)
conceptual_relations = [
('s46683', ConRel.has_component_meronym, ['s39494']),
('s8813', ConRel.has_component_meronym,
['s5731', 's40989', 's6270', 's8925', 's7580', 's6281', 's6377', 's8929', 's6384', 's6383', 's10140', 's25669',
's9020', 's40988', 's6032', 's8880', 's6385', 's40985', 's6003', 's5998']),
('s59726', ConRel.causes, ['s3075']),
('s42888', ConRel.is_related_to, ['s84261', 's41336']),
('s41689', ConRel.has_component_holonym, ['s42888'])
]
conceptual_incoming_relations = [
('s58848', ConRel.causes, ['s56473']),
('s41689', ConRel.has_component_meronym, ['s42888']),
('s54503', ConRel.entails, ['s54618', 's54339'])
]
all_hypernyms = [
('s131', ['s91', 's90', 's0', 's51001']),
('s50944',
['s27100', 's27099', 's27094', 's27090', 's23307', 's22562', 's47101', 's47083', 's50999', 's50981', 's50980',
's51001', 's27075', 's27071', 's26979', 's50708', 's50915', 's48805', 's50997', 's50990', 's50986', 's50982',
's50706', 's50688', 's50687', 's50519', 's50498', 's49812', 's49800', 's48873']),
('s57835', ['s57779', 's57714', 's57713', 's57324', 's57318', 's57309', 's60939', 's51001'])
]
all_hyponyms = [
('s131', []),
('s50944', ['s132135', 's132134', 's132133']),
('s53071', ['s53072', 's53073']),
('s11302',
['s136666', 's11193', 's11194', 's134108', 's122100', 's129336', 's123104', 's122867', 's29492',
's82838','s147555', 's147995','s147814','s149441',
's101538', 's10919', 's10937', 's104197', 's106059', 's110131', 's90623', 's10920', 's88973', 's68093', 's64311',
's29494', 's100276', 's97802', 's88563', 's88561', 's81894', 's71826', 's71198', 's63234', 's11306', 's11305',
's11304', 's11303', 's107850'])
]
paths_between_synsets_nouns = [
('s50708', 's48836', ['s50708', 's50915', 's50696', 's48836']),
('s50708', 's34063', ['s50708', 's50915', 's48805', 's50997', 's34063']),
('s34063', 's8813',
['s34063', 's50997', 's50990', 's50986', 's50982', 's50981', 's50999', 's5550', 's5675', 's8702', 's8714', 's8716',
's8813']),
('s42337', 's73124', ['s42337', 's9938', 's9918', 's47083', 's73124']),
('s46665', 's100607', ['s46665', 's46042', 's100607']),
('s46665', 's46683', ['s46665', 's46042', 's46682', 's46683'])
]
several_paths = [
('s46683', 's46650', [['s46683', 's46682', 's46042', 's46041', 's44960', 's46650'],
['s46683', 's46682', 's46311', 's44965', 's44960', 's46650'],
])
]
paths_between_synsets_adj = [
('s3', 's158', ['s3', 's79860', 's2246', 's2245', 's51001', 's4452', 's154', 's155', 's158']),
('s96631', 's805', ['s96631', 's21', 's2', 's1', 's0', 's90', 's214', 's242', 's805'])
]
paths_between_synsets_verbs = [
('s52219', 's52747', ['s52219', 's52202', 's59388', 's51948', 's51946', 's51892', 's52746', 's52747']),
('s57835', 's52201',
['s57835', 's57779', 's57714', 's57713', 's57324', 's57318', 's57309', 's57310', 's57316', 's57476', 's52201'])
]
LCS_between_nouns = [
('s50708', 's48836', ['s50915']),
('s50708', 's34063', ['s50997']),
('s34063', 's8813', ['s50981']),
('s42337', 's73124', ['s47083']),
('s39494', 's39495', ['s39491']),
('s39494', 's46042', ['s50981']),
('s50869', 's11106', ['s50981']),
('s46665', 's7922', ['s7917']),
('s46657', 's46659', ['s46657']),
('s50944', 's50708', ['s50708']),
('s50708', 's50944', ['s50708']),
('s50708', 's50708', ['s50708'])
]
LCS_between_adj = [
('s3', 's158', ['s51001']),
('s96631', 's805', ['s0']),
('s94411', 's94543', ['s0', 's51001']),
('s94411', 's94396', ['s51001'])
]
LCS_between_verbs = [
('s52219', 's52747', ['s51892']),
('s107484', 's61151', ['s52270'])
]
distances_hypernyms = [
('s50944', 's50708', 1),
('s50944', 's50706', 2),
('s50944', 's50688', 3),
('s50944', 's50687', 4),
('s50944', 's50519', 5),
('s50944', 's50498', 6),
('s50944', 's49812', 7),
('s50944', 's49800', 8),
('s50944', 's48873', 9),
('s50944', 's48805', 3),
('s50944', 's50997', 4)
]
@pytest.mark.parametrize('id,hypernym_id, distance', distances_hypernyms)
def test_hypernym_distance_dic(id, hypernym_id, distance):
"""This test checks whether the synsets distance dictionary contains the correct distances to a number of
hypernyms."""
synset = germanet_data.get_synset_by_id(id)
hypernym = germanet_data.get_synset_by_id(hypernym_id)
distances = synset.get_distances_hypernym_dic()
hypernym_dist = distances[hypernym]
np.testing.assert_equal(hypernym_dist, distance)
@pytest.mark.parametrize('id,hypernym_ids', all_hypernyms)
def test_all_hypernyms(id, hypernym_ids):
"""This test checks whether for a given synset, all possible hypernyms are returned"""
synset = germanet_data.get_synset_by_id(id)
hypernyms = synset.all_hypernyms()
np.testing.assert_equal(sorted([synset.id for synset in hypernyms]), sorted(hypernym_ids))
@pytest.mark.parametrize('id,hyponym_ids', all_hyponyms)
def test_all_hyponyms(id, hyponym_ids):
"""This test checks whether for a given synset, all possible hyponyms are returned"""
synset = germanet_data.get_synset_by_id(id)
hyponyms = synset.all_hyponyms()
np.testing.assert_equal(sorted([synset.id for synset in hyponyms]), sorted(hyponym_ids))
def test_root():
"""This test checks some properties for the root node of the GermaNet."""
gnroot = 's51001'
root = germanet_data.get_synset_by_id(gnroot)
np.testing.assert_equal(root.is_root(), True)
np.testing.assert_equal(root.is_leaf(), False)
def test_leafs():
"""This tests whether leaf nodes are have the property 'leaf'"""
leafs = ['s6675', 's136315', 's10765', 's106594', 's131']
for leaf in leafs:
synset = germanet_data.get_synset_by_id(leaf)
np.testing.assert_equal(synset.is_root(), False)
np.testing.assert_equal(synset.is_leaf(), True)
def get_shortest_paths(id1, id2):
"""Auxiliary method to return the shortest path between two synsets."""
syn1 = germanet_data.get_synset_by_id(id1)
syn2 = germanet_data.get_synset_by_id(id2)
assert len(syn1.shortest_path(syn2)) == 1, "do not test for synsets with several shortest paths"
return syn1.shortest_path(syn2)[0]
@pytest.mark.parametrize('id1,id2,expected_path_ids', several_paths)
def test_several_paths(id1, id2, expected_path_ids):
"""Tests whether several shortest paths between two synsets are correct"""
syn1 = germanet_data.get_synset_by_id(id1)
syn2 = germanet_data.get_synset_by_id(id2)
paths = syn1.shortest_path(syn2)
assert len(paths) == len(expected_path_ids), "the number of found paths doesn't macht the true number of paths"
for path in paths:
path = [synset.id for synset in path]
np.testing.assert_equal(path in expected_path_ids, True)
@pytest.mark.parametrize('id1,id2,expected_path_ids', paths_between_synsets_nouns)
def test_paths_nouns(id1, id2, expected_path_ids):
"""Tests whether the correct shortest paths between two given synsets nouns are returned."""
path = get_shortest_paths(id1, id2)
np.testing.assert_equal([synset.id for synset in path], expected_path_ids)
@pytest.mark.parametrize('id1,id2,expected_path_ids', paths_between_synsets_adj)
def test_paths_adj(id1, id2, expected_path_ids):
"""Tests whether the correct shortest paths between two given synsets adjectives are returned."""
path = get_shortest_paths(id1, id2)
np.testing.assert_equal([synset.id for synset in path], expected_path_ids)
@pytest.mark.parametrize('id1,id2,expected_path_ids', paths_between_synsets_verbs)
def test_paths_verbs(id1, id2, expected_path_ids):
"""Tests whether the correct shortest paths between two given synsets verbs are returned."""
path = get_shortest_paths(id1, id2)
np.testing.assert_equal([synset.id for synset in path], expected_path_ids)
@pytest.mark.parametrize('id1,id2,expected_ids', LCS_between_nouns)
def test_lcs_nouns(id1, id2, expected_ids):
"""Tests whether the lowest common subsumers between two given nouns are correct."""
syn1 = germanet_data.get_synset_by_id(id1)
syn2 = germanet_data.get_synset_by_id(id2)
lcs = syn1.lowest_common_subsumer(syn2)
np.testing.assert_equal(sorted([l.id for l in lcs]), sorted(expected_ids))
@pytest.mark.parametrize('id1,id2,expected_ids', LCS_between_verbs)
def test_lcs_verbs(id1, id2, expected_ids):
"""Tests whether the lowest common subsumers between two given verbs are correct."""
syn1 = germanet_data.get_synset_by_id(id1)
syn2 = germanet_data.get_synset_by_id(id2)
lcs = syn1.lowest_common_subsumer(syn2)
np.testing.assert_equal(sorted([l.id for l in lcs]), sorted(expected_ids))
@pytest.mark.parametrize('id1,id2,expected_ids', LCS_between_adj)
def test_lcs_adjectives(id1, id2, expected_ids):
"""Tests whether the lowest common subsumers between two given adjectives are correct."""
syn1 = germanet_data.get_synset_by_id(id1)
syn2 = germanet_data.get_synset_by_id(id2)
lcs = syn1.lowest_common_subsumer(syn2)
np.testing.assert_equal(sorted([l.id for l in lcs]), sorted(expected_ids))
@pytest.mark.parametrize('id,conrel,expected_ids', conceptual_relations)
def test_conceptional_relations(id, conrel, expected_ids):
"""Checks if a synset contains the correct conceptual relations."""
synset = germanet_data.get_synset_by_id(id)
related = synset.relations[conrel]
np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))
@pytest.mark.parametrize('id,conrel,expected_ids', conceptual_incoming_relations)
def test_incoming_conceptional_relations(id, conrel, expected_ids):
"""Checks if a synset contains the correct incoming conceptual relations."""
synset = germanet_data.get_synset_by_id(id)
related = synset.incoming_relations[conrel]
np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))
|
#! /usr/bin/python
import unittest
import application.bootstrap
from application.models.IsgGurbaniDb import IsgGurbaniDb
class test_IsgGurbaniDb(unittest.TestCase):
_object = None
def setUp(self):
self._object = IsgGurbaniDb(application.bootstrap.config)
def test_first_letter_search(self):
query = 'hjkkqp' #Har Jeeo Kirapaa Karahu Thum Piaarae
self.assertEqual(self._object.first_letter_search(query),
[(26355, 2289, u'Har Jeeo Kirapaa Karahu Thum Piaarae ||')],
"does it return a sabad for a known query")
query = 'zzzzzzzz1222'
self.assertEqual(self._object.first_letter_search(query), [], "does it return an empty array for a dodgy query")
#If No Query passed in
with self.assertRaises(TypeError):
self._object.first_letter_search()
def test_get_sabad_by_sabad_id(self):
sabad_id = 1283 #sahib kare kabool
expected_data = [(14760, 323, u'sggs', 7, 1283, u'm\xda 5 ]', u'M\u0117hl\u0101 5.', u'Fifth Mehl:'), (
14761, 323, u'sggs', 8, 1283, u'jwicku mMgY inq nwmu swihbu kry kbUlu ]',
u'J\u0101c\u1e96ik mangai ni\u1e6f n\u0101m s\u0101hib kare kab\u016bl.',
u"If the beggar begs for the Lord's Name every day, his Lord and Master will grant his request."), (
14762, 323, u'sggs', 8, 1283, u'nwnk prmysru jjmwnu iqsih BuK n mUil ]2]',
u'N\u0101nak parmesar jajm\u0101n \u1e6fis\u0117h b\u1e96uk\u1e96 na m\u016bl. ||2||',
u'O Nanak, the Transcendent Lord is the most generous host; He does not lack anything at all. ||2||')]
self.assertEqual(self._object.get_sabad_by_sabad_id(sabad_id), expected_data,
"does it return a sabad for a known sabad_id")
sabad_id = 0
self.assertEqual(self._object.get_sabad_by_sabad_id(sabad_id), [],
"does it return an empty array for a dodgy sabad_id")
#If No id passed in
with self.assertRaises(TypeError):
self._object.first_letter_search() |
"""
Copyright 2019 Brain Electrophysiology Laboratory Company LLC
Licensed under the ApacheLicense, Version 2.0(the "License");
you may not use this module except in compliance with the License.
You may obtain a copy of the License at:
http: // www.apache.org / licenses / LICENSE - 2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
ANY KIND, either express or implied.
"""
from os import makedirs
from os.path import splitext, exists, join
from subprocess import check_output
import xml.etree.ElementTree as ET
from typing import Dict, Any
from .dict2xml import dict2xml
from .xml_files import XML
from .bin_writer import BinWriter, StreamingBinWriter
from .devices import coordinates_and_sensor_layout
import json
__all__ = ['Writer', 'BinWriter', 'StreamingBinWriter']
class Writer:
def __init__(self, filename: str):
self.filename = filename
self.files: Dict[str, Any] = {}
self.num_bin_files = 0
self.mffdir, self.ext = splitext(self.filename)
self.mffdir += '.mff'
self.file_created = False
def create_directory(self):
"""Creates the directory for the recording."""
if not self.file_created:
makedirs(self.mffdir, exist_ok=False)
self.file_created = True
def write(self):
"""write contents to .mff/.mfz file"""
self.create_directory()
# write .xml/.bin files. For .xml files we need to set the default
# namespace to avoid `ns0:` being prepended to each tag.
for filename, (content, typ) in self.files.items():
if '.xml' == splitext(filename)[1]:
ET.register_namespace('', typ._xmlns[1:-1])
content.write(join(self.mffdir, filename), encoding='UTF-8',
xml_declaration=True, method='xml')
# convert from .mff to .mfz
if self.ext == '.mfz':
check_output(['mff2mfz.py', self.mffdir])
def export_to_json(self, data):
"""export data to .json file"""
# create .json file
with open(self.filename, 'w') as file:
json.dump(data, file, indent=4)
def addxml(self, xmltype, filename=None, **kwargs):
"""Add an .xml file to the collection
**Parameters**
*xmltype*: determines to which `XML.todict` the kwargs are passed
*filename*: (defaults `content['filename']`) filename of the xml file
"""
content = XML.todict(xmltype, **kwargs)
content_filename = content.pop('filename')
filename = filename or content_filename
self.files[filename] = (
dict2xml(**content), type(XML)._tag_registry[xmltype])
def addbin(self, binfile: BinWriter, filename=None):
"""Add the .bin file to the collection
**Parameters**
*binfile*: `class BinWriter` to be added to the collection
*filename*: (defaults to `binfile.default_filename_fmt %
self.num_bin_files`) filename of the bin file. It's not
recommended to change this default value.
"""
self.num_bin_files += 1
binname = filename or \
(binfile.default_filename_fmt % self.num_bin_files)
binfile.check_compatibility(binname)
infoname = binfile.default_info_filename_fmt % self.num_bin_files
self.files[binname] = (binfile, type(binfile))
self.addxml('dataInfo', filename=infoname, **binfile.get_info_kwargs())
if self.num_bin_files == 1:
# "epochs.xml" is only added for the first binary file
self.addxml('epochs', epochs=binfile.epochs)
def add_coordinates_and_sensor_layout(self, device: str) -> None:
"""Add coordinates.xml and sensorLayout.xml to the writer
**Parameters**
*device*: name string of a device. Valid choices are in
"mffpy/resources/coordinates".
"""
xmls = coordinates_and_sensor_layout(device)
for name, xml in xmls.items():
self.files[name + '.xml'] = (ET.ElementTree(xml.root), type(xml))
@property
def filename(self) -> str:
return self._filename
@filename.setter # type: ignore
def filename(self, fn: str):
"""check filename with .mff/.mfz extension does not exist"""
base, ext = splitext(fn)
assert ext in ('.mff', '.mfz', '.json')
assert not exists(fn), f"File '{fn}' exists already"
if ext == '.mfz':
assert not exists(base + '.mff')
self._filename = fn
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
import csv
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class JbhifiSpider(BaseSpider):
name = 'jbhifi.com.au'
allowed_domains = ['jbhifi.com.au', 'jbhifionline.com.au']
start_urls = ['http://www.jbhifionline.com.au']
def parse(self, response):
hxs = HtmlXPathSelector(response)
relative_urls = hxs.select('//*[@id="outernavigation"]/table/tr/td/a/@href').extract()
for relative_url in relative_urls:
url = urljoin_rfc(get_base_url(response), relative_url)
yield Request(url, callback=self.parse_subcategories)
def parse_subcategories(self, response):
hxs = HtmlXPathSelector(response)
relative_urls = hxs.select('//*[@id="leftNav"]/div[@class="sidenav"]/ul/li/a/@href').extract()
for relative_url in relative_urls:
url = urljoin_rfc(get_base_url(response), relative_url)
yield Request(url, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="result_container"]')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'div/div/div/div/h1/a/text()')
url = urljoin_rfc(get_base_url(response), product.select('div/div/div/div/h1/a/@href').extract()[0])
loader.add_value('url', url)
loader.add_xpath('price', 'div//div[@class="price-image-layer"]/img/@alt')
yield loader.load_item()
next = hxs.select('//div[@class="CatNavigation"]/a[text()="»"]/@href'.decode('utf')).extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url, callback=self.parse_products)
|
#!/usr/bin/env python3
"""
Python 3 examples using Numbers
Author: Miguel Rentes
https://github.com/rentes/python3-code-snippets/blob/master/core_datatypes/numbers/numbers.py
Python version: 3.7.2
"""
result = 123 + 456 # Integer Addition
print(result) # Result: 579
print(type(result)) # result is an object of type 'int'
result = 1.23 + 456 # Float Addition
print(result) # Result: 457.23
print(type(result)) # result is an object of type 'float'
result = 123 - 456 # Integer Subtraction
print(result) # Result: -333
print(type(result)) # result is an object of type 'int'
result = 1.23 - 456 # Float Subtraction
print(result) # Result: -454.77
print(type(result)) # result is an object of type 'float'
result = 123 * 456 # Integer Multiplication
print(result) # Result: 56088
print(type(result)) # result is an object of type 'int'
result = 123 / 456 # Integer Division
print(result) # Result: 0.26973684210526316
print(type(result)) # result is an object of type 'float'
result = 123 ** 2 # Integer Exponentiation, e.g., successive multiplications. 123 ** 2 = 123 * 123
print(result) # Result: 15129
print(type(result)) # result is an object of type 'int'
result = 1.23 ** 2 # Float Exponentiation
print(result) # Result: 1.5129
print(type(result)) # result is an object of type 'float'
|
# Copyright (c) 2017 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from cinder.policies import base
# MANAGE_POLICY is deprecated
MANAGE_POLICY = 'group:group_types_manage'
CREATE_POLICY = 'group:group_types:create'
UPDATE_POLICY = 'group:group_types:update'
DELETE_POLICY = 'group:group_types:delete'
SHOW_ACCESS_POLICY = 'group:access_group_types_specs'
# SPEC_POLICY is deprecated
SPEC_POLICY = 'group:group_types_specs'
SPEC_GET_POLICY = 'group:group_types_specs:get'
SPEC_GET_ALL_POLICY = 'group:group_types_specs:get_all'
SPEC_CREATE_POLICY = 'group:group_types_specs:create'
SPEC_UPDATE_POLICY = 'group:group_types_specs:update'
SPEC_DELETE_POLICY = 'group:group_types_specs:delete'
deprecated_manage_policy = base.CinderDeprecatedRule(
name=MANAGE_POLICY,
check_str=base.RULE_ADMIN_API,
deprecated_reason=(f'{MANAGE_POLICY} has been replaced by more granular '
'policies that separately govern POST, PUT, and DELETE '
'operations.'),
)
deprecated_spec_policy = base.CinderDeprecatedRule(
name=SPEC_POLICY,
check_str=base.RULE_ADMIN_API,
deprecated_reason=(f'{SPEC_POLICY} has been replaced by more granular '
'policies that separately govern GET, POST, PUT, and '
'DELETE operations.'),
)
group_types_policies = [
policy.DocumentedRuleDefault(
name=CREATE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Create a group type.",
operations=[
{
'method': 'POST',
'path': '/group_types/'
},
],
deprecated_rule=deprecated_manage_policy,
),
policy.DocumentedRuleDefault(
name=UPDATE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Update a group type.",
operations=[
{
'method': 'PUT',
'path': '/group_types/{group_type_id}'
},
],
deprecated_rule=deprecated_manage_policy,
),
policy.DocumentedRuleDefault(
name=DELETE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Delete a group type.",
operations=[
{
'method': 'DELETE',
'path': '/group_types/{group_type_id}'
},
],
deprecated_rule=deprecated_manage_policy,
),
policy.DocumentedRuleDefault(
name=SHOW_ACCESS_POLICY,
check_str=base.RULE_ADMIN_API,
description="Show group type with type specs attributes.",
operations=[
{
'method': 'GET',
'path': '/group_types/{group_type_id}'
}
]
),
policy.DocumentedRuleDefault(
name=SPEC_GET_POLICY,
check_str=base.RULE_ADMIN_API,
description="Show a group type spec.",
operations=[
{
'method': 'GET',
'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}'
},
],
deprecated_rule=deprecated_spec_policy,
),
policy.DocumentedRuleDefault(
name=SPEC_GET_ALL_POLICY,
check_str=base.RULE_ADMIN_API,
description="List group type specs.",
operations=[
{
'method': 'GET',
'path': '/group_types/{group_type_id}/group_specs'
},
],
deprecated_rule=deprecated_spec_policy,
),
policy.DocumentedRuleDefault(
name=SPEC_CREATE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Create a group type spec.",
operations=[
{
'method': 'POST',
'path': '/group_types/{group_type_id}/group_specs'
},
],
deprecated_rule=deprecated_spec_policy,
),
policy.DocumentedRuleDefault(
name=SPEC_UPDATE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Update a group type spec.",
operations=[
{
'method': 'PUT',
'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}'
},
],
deprecated_rule=deprecated_spec_policy,
),
policy.DocumentedRuleDefault(
name=SPEC_DELETE_POLICY,
check_str=base.RULE_ADMIN_API,
description="Delete a group type spec.",
operations=[
{
'method': 'DELETE',
'path': '/group_types/{group_type_id}/group_specs/{g_spec_id}'
},
],
deprecated_rule=deprecated_spec_policy,
),
]
def list_rules():
return group_types_policies
|
import angr
from angr.sim_type import SimTypeString
class strcpy(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, dst, src):
self.argument_types = {0: self.ty_ptr(SimTypeString()),
1: self.ty_ptr(SimTypeString())}
self.return_type = self.ty_ptr(SimTypeString())
strlen = angr.SIM_PROCEDURES['libc']['strlen']
strncpy = angr.SIM_PROCEDURES['libc']['strncpy']
src_len = self.inline_call(strlen, src)
ret_expr = self.inline_call(strncpy, dst, src, src_len.ret_expr+1, src_len=src_len.ret_expr).ret_expr
return ret_expr
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
References
.. [Zhe2019hogp]
S. Zhe, W. Xing, and R. M. Kirby. Scalable high-order gaussian process regression.
Proceedings of Machine Learning Research, volume 89, Apr 2019.
"""
from __future__ import annotations
import warnings
from contextlib import ExitStack
from typing import Any, List, Optional, Union, Tuple
import torch
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from botorch.models.transforms.input import InputTransform
from botorch.models.transforms.outcome import OutcomeTransform, Standardize
from botorch.models.utils import gpt_posterior_settings
from botorch.posteriors import (
GPyTorchPosterior,
HigherOrderGPPosterior,
TransformedPosterior,
)
from gpytorch.constraints import GreaterThan
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import Kernel, MaternKernel
from gpytorch.lazy import (
BatchRepeatLazyTensor,
DiagLazyTensor,
KroneckerProductLazyTensor,
LazyTensor,
ZeroLazyTensor,
)
from gpytorch.likelihoods import (
GaussianLikelihood,
Likelihood,
)
from gpytorch.models import ExactGP
from gpytorch.priors.torch_priors import GammaPrior, MultivariateNormalPrior
from gpytorch.settings import fast_pred_var, skip_posterior_variances
from torch import Tensor
from torch.nn import ModuleList, Parameter, ParameterList
MIN_INFERRED_NOISE_LEVEL = 1e-4
class FlattenedStandardize(Standardize):
r"""
Standardize outcomes in a structured multi-output settings by reshaping the
batched output dimensions to be a vector. Specifically, an output dimension
of [a x b x c] will be squeezed to be a vector of [a * b * c].
"""
def __init__(
self,
output_shape: torch.Size,
batch_shape: torch.Size = None,
min_stdv: float = 1e-8,
):
if batch_shape is None:
batch_shape = torch.Size()
super(FlattenedStandardize, self).__init__(
m=1, outputs=None, batch_shape=batch_shape, min_stdv=min_stdv
)
self.output_shape = output_shape
self.batch_shape = batch_shape
def _squeeze_to_single_output(self, tsr: Tensor) -> Tensor:
dim_ct = tsr.ndim - len(self.output_shape) - 1
return tsr.reshape(*tsr.shape[:dim_ct], -1, 1)
def _return_to_output_shape(self, tsr: Tensor) -> Tensor:
out = tsr.reshape(*tsr.shape[:-2], -1, *self.output_shape)
return out
def forward(
self, Y: Tensor, Yvar: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
Yvar = self._squeeze_to_single_output(Yvar)
Y, Yvar = super().forward(Y, Yvar)
Y_out = self._return_to_output_shape(Y)
if Yvar is not None:
Yvar_out = self._return_to_output_shape(Yvar)
else:
Yvar_out = None
return Y_out, Yvar_out
def untransform(
self, Y: Tensor, Yvar: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
Y = self._squeeze_to_single_output(Y)
if Yvar is not None:
Yvar = self._squeeze_to_single_output(Yvar)
Y, Yvar = super().untransform(Y, Yvar)
Y = self._return_to_output_shape(Y)
if Yvar is not None:
Yvar = self._return_to_output_shape(Yvar)
return Y, Yvar
def untransform_posterior(
self, posterior: HigherOrderGPPosterior
) -> TransformedPosterior:
# TODO: return a HigherOrderGPPosterior once rescaling constant
# muls * LazyTensors won't force a dense decomposition rather than a
# Kronecker structured one.
return TransformedPosterior(
posterior=posterior,
sample_transform=lambda s: self._return_to_output_shape(
self.means + self.stdvs * self._squeeze_to_single_output(s)
),
mean_transform=lambda m, v: self._return_to_output_shape(
self.means + self.stdvs * self._squeeze_to_single_output(m)
),
variance_transform=lambda m, v: self._return_to_output_shape(
self._stdvs_sq * self._squeeze_to_single_output(v)
),
)
class HigherOrderGP(BatchedMultiOutputGPyTorchModel, ExactGP):
r"""
A Higher order Gaussian process model (HOGP) (predictions are matrices/tensors) as
described in [Zhe2019hogp]_. The posterior uses Matheron's rule [Doucet2010sampl]_
as described in [Maddox2021bohdo]_.
"""
def __init__(
self,
train_X: Tensor,
train_Y: Tensor,
likelihood: Optional[Likelihood] = None,
covar_modules: Optional[List[Kernel]] = None,
num_latent_dims: Optional[List[int]] = None,
learn_latent_pars: bool = True,
latent_init: str = "default",
outcome_transform: Optional[OutcomeTransform] = None,
input_transform: Optional[InputTransform] = None,
):
r"""A HigherOrderGP model for high-dim output regression.
Args:
train_X: A `batch_shape x n x d`-dim tensor of training inputs.
train_Y: A `batch_shape x n x output_shape`-dim tensor of training targets.
likelihood: Gaussian likelihood for the model.
covar_modules: List of kernels for each output structure.
num_latent_dims: Sizes for the latent dimensions.
learn_latent_pars: If true, learn the latent parameters.
latent_init: [default or gp] how to initialize the latent parameters.
"""
if input_transform is not None:
input_transform.to(train_X)
# infer the dimension of `output_shape`.
num_output_dims = train_Y.dim() - train_X.dim() + 1
batch_shape = train_X.shape[:-2]
if len(batch_shape) > 1:
raise NotImplementedError(
"HigherOrderGP currently only supports 1-dim `batch_shape`."
)
if outcome_transform is not None:
if isinstance(outcome_transform, Standardize) and not isinstance(
outcome_transform, FlattenedStandardize
):
warnings.warn(
"HigherOrderGP does not support the outcome_transform "
"`Standardize`! Using `FlattenedStandardize` with `output_shape="
f"{train_Y.shape[- num_output_dims:]} and batch_shape="
f"{batch_shape} instead.",
RuntimeWarning,
)
outcome_transform = FlattenedStandardize(
output_shape=train_Y.shape[-num_output_dims:],
batch_shape=batch_shape,
)
train_Y, _ = outcome_transform(train_Y)
self._aug_batch_shape = batch_shape
self._num_dimensions = num_output_dims + 1
self._num_outputs = train_Y.shape[0] if batch_shape else 1
self.target_shape = train_Y.shape[-num_output_dims:]
self._input_batch_shape = batch_shape
if likelihood is None:
noise_prior = GammaPrior(1.1, 0.05)
noise_prior_mode = (noise_prior.concentration - 1) / noise_prior.rate
likelihood = GaussianLikelihood(
noise_prior=noise_prior,
batch_shape=self._aug_batch_shape,
noise_constraint=GreaterThan(
MIN_INFERRED_NOISE_LEVEL,
transform=None,
initial_value=noise_prior_mode,
),
)
else:
self._is_custom_likelihood = True
super().__init__(
train_X,
train_Y.view(*self._aug_batch_shape, -1),
likelihood=likelihood,
)
if covar_modules is not None:
self.covar_modules = ModuleList(covar_modules)
else:
self.covar_modules = ModuleList(
[
MaternKernel(
nu=2.5,
lengthscale_prior=GammaPrior(3.0, 6.0),
batch_shape=self._aug_batch_shape,
ard_num_dims=1 if dim > 0 else train_X.shape[-1],
)
for dim in range(self._num_dimensions)
]
)
if num_latent_dims is None:
num_latent_dims = [1] * (self._num_dimensions - 1)
self.to(train_X)
self._initialize_latents(
latent_init=latent_init,
num_latent_dims=num_latent_dims,
learn_latent_pars=learn_latent_pars,
device=train_Y.device,
dtype=train_Y.dtype,
)
if outcome_transform is not None:
self.outcome_transform = outcome_transform
if input_transform is not None:
self.input_transform = input_transform
def _initialize_latents(
self,
latent_init: str,
num_latent_dims: List[int],
learn_latent_pars: bool,
device: torch.device,
dtype: torch.dtype,
):
self.latent_parameters = ParameterList()
if latent_init == "default":
for dim_num in range(len(self.covar_modules) - 1):
self.latent_parameters.append(
Parameter(
torch.rand(
*self._aug_batch_shape,
self.target_shape[dim_num],
num_latent_dims[dim_num],
device=device,
dtype=dtype,
),
requires_grad=learn_latent_pars,
)
)
elif latent_init == "gp":
for dim_num, covar in enumerate(self.covar_modules[1:]):
latent_covar = covar(
torch.linspace(
0.0,
1.0,
self.target_shape[dim_num],
device=device,
dtype=dtype,
)
).add_jitter(1e-4)
latent_dist = MultivariateNormal(
torch.zeros(
*self._aug_batch_shape,
self.target_shape[dim_num],
device=device,
dtype=dtype,
),
latent_covar,
)
sample_shape = torch.Size((num_latent_dims[dim_num],))
latent_sample = latent_dist.sample(sample_shape=sample_shape)
latent_sample = latent_sample.reshape(
*self._aug_batch_shape,
self.target_shape[dim_num],
num_latent_dims[dim_num],
)
self.latent_parameters.append(
Parameter(
latent_sample,
requires_grad=learn_latent_pars,
)
)
self.register_prior(
"latent_parameters_" + str(dim_num),
MultivariateNormalPrior(
latent_dist.loc,
latent_dist.covariance_matrix.detach().clone(),
transform=lambda x: x.squeeze(-1),
),
lambda module, dim_num=dim_num: self.latent_parameters[dim_num],
)
def forward(self, X: Tensor) -> MultivariateNormal:
if self.training:
X = self.transform_inputs(X)
covariance_list = []
covariance_list.append(self.covar_modules[0](X))
for cm, param in zip(self.covar_modules[1:], self.latent_parameters):
if not self.training:
with torch.no_grad():
covariance_list.append(cm(param))
else:
covariance_list.append(cm(param))
# check batch_shapes
if covariance_list[0].batch_shape != covariance_list[1].batch_shape:
for i in range(1, len(covariance_list)):
cm = covariance_list[i]
covariance_list[i] = BatchRepeatLazyTensor(
cm, covariance_list[0].batch_shape
)
kronecker_covariance = KroneckerProductLazyTensor(*covariance_list)
# TODO: expand options for the mean module via batch shaping?
mean = torch.zeros(
*covariance_list[0].batch_shape,
kronecker_covariance.shape[-1],
device=kronecker_covariance.device,
dtype=kronecker_covariance.dtype,
)
return MultivariateNormal(mean, kronecker_covariance)
def get_fantasy_model(self, inputs, targets, **kwargs):
# we need to squeeze the targets in order to preserve the shaping
inputs_batch_dims = len(inputs.shape[:-2])
target_shape = (*inputs.shape[:-2], -1)
if (inputs_batch_dims + self._num_dimensions) < targets.ndim:
target_shape = (targets.shape[0], *target_shape)
reshaped_targets = targets.view(*target_shape)
return super().get_fantasy_model(inputs, reshaped_targets, **kwargs)
def condition_on_observations(
self, X: Tensor, Y: Tensor, **kwargs: Any
) -> HigherOrderGP:
r"""Condition the model on new observations.
Args:
X: A `batch_shape x n' x d`-dim Tensor, where `d` is the dimension of
the feature space, `m` is the number of points per batch, and
`batch_shape` is the batch shape (must be compatible with the
batch shape of the model).
Y: A `batch_shape' x n' x m_d`-dim Tensor, where `m_d` is the shaping
of the model outputs, `n'` is the number of points per batch, and
`batch_shape'` is the batch shape of the observations.
`batch_shape'` must be broadcastable to `batch_shape` using
standard broadcasting semantics. If `Y` has fewer batch dimensions
than `X`, its is assumed that the missing batch dimensions are
the same for all `Y`.
Returns:
A `BatchedMultiOutputGPyTorchModel` object of the same type with
`n + n'` training examples, representing the original model
conditioned on the new observations `(X, Y)` (and possibly noise
observations passed in via kwargs).
"""
noise = kwargs.get("noise")
if hasattr(self, "outcome_transform"):
# we need to apply transforms before shifting batch indices around
Y, noise = self.outcome_transform(Y, noise)
self._validate_tensor_args(X=X, Y=Y, Yvar=noise, strict=False)
# we don't need to do un-squeezing because Y already is batched
# we don't support fixed noise here yet
# if noise is not None:
# kwargs.update({"noise": noise})
fantasy_model = super(
BatchedMultiOutputGPyTorchModel, self
).condition_on_observations(X=X, Y=Y, **kwargs)
fantasy_model._input_batch_shape = fantasy_model.train_targets.shape[
: (-1 if self._num_outputs == 1 else -2)
]
fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1]
return fantasy_model
def posterior(
self,
X: Tensor,
output_indices: Optional[List[int]] = None,
observation_noise: Union[bool, Tensor] = False,
**kwargs: Any,
) -> GPyTorchPosterior:
self.eval() # make sure we're calling a posterior
# input transforms are applied at `posterior` in `eval` mode, and at
# `model.forward()` at the training time
X = self.transform_inputs(X)
no_pred_variance = skip_posterior_variances._state
with ExitStack() as es:
es.enter_context(gpt_posterior_settings())
es.enter_context(fast_pred_var(True))
# we need to skip posterior variances here
es.enter_context(skip_posterior_variances(True))
mvn = self(X)
if observation_noise is not False:
# TODO: ensure that this still works for structured noise solves.
mvn = self.likelihood(mvn, X)
# lazy covariance matrix includes the interpolated version of the full
# covariance matrix so we can actually grab that instead.
if X.ndimension() > self.train_inputs[0].ndimension():
X_batch_shape = X.shape[:-2]
train_inputs = self.train_inputs[0].reshape(
*[1] * len(X_batch_shape), *self.train_inputs[0].shape
)
train_inputs = train_inputs.repeat(
*X_batch_shape, *[1] * self.train_inputs[0].ndimension()
)
else:
train_inputs = self.train_inputs[0]
# we now compute the data covariances for the training data, the testing
# data, the joint covariances, and the test train cross-covariance
train_train_covar = self.prediction_strategy.lik_train_train_covar.detach()
base_train_train_covar = train_train_covar.lazy_tensor
data_train_covar = base_train_train_covar.lazy_tensors[0]
data_covar = self.covar_modules[0]
data_train_test_covar = data_covar(X, train_inputs)
data_test_test_covar = data_covar(X)
data_joint_covar = data_train_covar.cat_rows(
cross_mat=data_train_test_covar,
new_mat=data_test_test_covar,
)
# we detach the latents so that they don't cause gradient errors
# TODO: Can we enable backprop through the latent covariances?
batch_shape = data_train_test_covar.batch_shape
latent_covar_list = []
for latent_covar in base_train_train_covar.lazy_tensors[1:]:
if latent_covar.batch_shape != batch_shape:
latent_covar = BatchRepeatLazyTensor(latent_covar, batch_shape)
latent_covar_list.append(latent_covar.detach())
joint_covar = KroneckerProductLazyTensor(
data_joint_covar, *latent_covar_list
)
test_train_covar = KroneckerProductLazyTensor(
data_train_test_covar, *latent_covar_list
)
# compute the posterior variance if necessary
if no_pred_variance:
pred_variance = mvn.variance
else:
pred_variance = self.make_posterior_variances(joint_covar)
# mean and variance get reshaped into the target shape
new_mean = mvn.mean.reshape(*X.shape[:-1], *self.target_shape)
if not no_pred_variance:
new_variance = pred_variance.reshape(*X.shape[:-1], *self.target_shape)
new_variance = DiagLazyTensor(new_variance)
else:
new_variance = ZeroLazyTensor(
*X.shape[:-1], *self.target_shape, self.target_shape[-1]
)
mvn = MultivariateNormal(new_mean, new_variance)
# return a specialized Posterior to allow for sampling
# cloning the full covar allows backpropagation through it
posterior = HigherOrderGPPosterior(
mvn=mvn,
train_targets=self.train_targets.unsqueeze(-1),
train_train_covar=train_train_covar,
test_train_covar=test_train_covar,
joint_covariance_matrix=joint_covar.clone(),
output_shape=X.shape[:-1] + self.target_shape,
num_outputs=self._num_outputs,
)
if hasattr(self, "outcome_transform"):
posterior = self.outcome_transform.untransform_posterior(posterior)
return posterior
def make_posterior_variances(self, joint_covariance_matrix: LazyTensor) -> Tensor:
r"""
Computes the posterior variances given the data points X. As currently
implemented, it computes another forwards call with the stacked data to get out
the joint covariance across all data points.
"""
# TODO: use the exposed joint covariances from the prediction strategy
data_joint_covariance = joint_covariance_matrix.lazy_tensors[
0
].evaluate_kernel()
num_train = self.train_inputs[0].shape[-2]
test_train_covar = data_joint_covariance[..., num_train:, :num_train]
train_train_covar = data_joint_covariance[..., :num_train, :num_train]
test_test_covar = data_joint_covariance[..., num_train:, num_train:]
full_train_train_covar = KroneckerProductLazyTensor(
train_train_covar, *joint_covariance_matrix.lazy_tensors[1:]
)
full_test_test_covar = KroneckerProductLazyTensor(
test_test_covar, *joint_covariance_matrix.lazy_tensors[1:]
)
full_test_train_covar_list = [test_train_covar] + [
*joint_covariance_matrix.lazy_tensors[1:]
]
train_evals, train_evecs = full_train_train_covar.symeig(eigenvectors=True)
# (\kron \Lambda_i + \sigma^2 I)^{-1}
train_inv_evals = DiagLazyTensor(1.0 / (train_evals + self.likelihood.noise))
# compute K_i S_i \hadamard K_i S_i
test_train_hadamard = KroneckerProductLazyTensor(
*[
lt1.matmul(lt2).evaluate() ** 2
for lt1, lt2 in zip(
full_test_train_covar_list, train_evecs.lazy_tensors
)
]
)
# and compute the column sums of
# (\kron K_i S_i * K_i S_i) \tilde{\Lambda}^{-1}
test_train_pred_covar = test_train_hadamard.matmul(train_inv_evals).sum(dim=-1)
pred_variances = full_test_test_covar.diag() - test_train_pred_covar
return pred_variances
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
import os
import sys
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
# -- Load Single-Sourced Config --------------- --- -- -
path = Path(__file__, '../../config.py').resolve()
spec = spec_from_file_location("config", path)
config = module_from_spec(spec)
spec.loader.exec_module(config)
# -- Path setup --------------- --- -- -
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration --------------- --- -- -
# See http://www.sphinx-doc.org/en/master/config
author = config.author
copyright = config.copyright
description = config.description
name = config.name
project = config.project
release = config.release
repo = config.repo
version = config.version
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '4.0.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
# 'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
# 'sphinx-jsonschema'
# 'sphinxcontrib.bibtex',
'pydata_sphinx_theme',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# default_role = 'any'
# suppress_warnings = ['app.add_directive']
# add option to include to do boxes
todo_include_todos = True
# -- Options for sphinx.ext.autodoc --------------- --- -- -
autoclass_content = 'both'
autodoc_default_options = {
'inherited-members': True,
'members': True,
'show-inheritance': True,
'undoc-members': True,
}
autodoc_member_order = 'bysource'
autodoc_typehints = "description"
# -- Options for HTML output --------------- --- -- -
# See http://www.sphinx-doc.org/en/master/usage/configuration.html#options-
# for-html-help-output
html_theme = "pydata_sphinx_theme"
html_theme_options = {
"external_links": [],
# "github_url": "https://github.com/pandas-dev/pandas",
# "twitter_url": "https://twitter.com/pandas_dev",
# "google_analytics_id": "UA-27880019-2",
# "show_toc_level": 2,
}
html_logo = "_static/logo_small.png"
# html_baseurl = "https://ominatechnologies.github.io/frozendict/"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static", "_static/.nojekyll"]
html_extra_path = [".nojekyll"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
'**': ['globaltoc.html', 'searchbox.html'],
}
html_copy_source = False
html_show_sourcelink = True
html_show_copyright = True
# The name of math_renderer extension for HTML output. Defaults to 'mathjax'.
html_math_renderer = 'mathjax'
# -- Options for HTMLHelp output --------------- --- -- -
# Output file base name for HTML help builder.
htmlhelp_basename = f'{name}doc'
# -- Options for LaTeX output --------------- --- -- -
latex_preamble = r"""
\usepackage{amsmath,amsfonts,amssymb,amsthm}
\usepackage{graphicx}
\usepackage{fancyhdr}
\usepackage{color}
\usepackage{transparent}
\usepackage{eso-pic}
\usepackage{lipsum}
\usepackage{footnotebackref}
% reduce spacing for itemize:
%\usepackage{enumitem}
%\setlist{nosep}
\setcounter{secnumdepth}{4}
\setcounter{tocdepth}{2}
\pagestyle{fancy}
\makeatletter
% Update normal pages:
\fancypagestyle{normal}{
\fancyhf{}
\fancyhead[LE,RO]{{\py@HeaderFamily FrozenDict v\version}}
\fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}}
\fancyfoot[LO]{{\py@HeaderFamily\nouppercase{\rightmark}}}
\fancyfoot[RE]{{\py@HeaderFamily\nouppercase{\leftmark}}}
\renewcommand{\headrulewidth}{0.4pt}
\renewcommand{\footrulewidth}{0.4pt}
}
% Update the first page of each chapter:
\fancypagestyle{plain}{
\fancyhf{}
\fancyfoot[LE,RO]{{\py@HeaderFamily\thepage}}
\renewcommand{\headrulewidth}{0.4pt}
\renewcommand{\footrulewidth}{0.4pt}
}
\makeatother
"""
latex_commands = {
'tuple': ("{\\langle{#1}\\rangle}", 1), # e.g. :math:`\tuple{a, b}`
'foo': "{\\operatorname{foo}}", # e.g. :math:`\foo_i`
}
def newcommand(name, spec) -> str:
if isinstance(spec, str):
pattern = "\\newcommand{{\\{}}}{}"
return pattern.format(name, spec)
elif isinstance(spec, tuple):
pattern = "\\newcommand{{\\{}}}[{}]{}"
return pattern.format(name, spec[1], spec[0])
else:
raise TypeError(f"Unexpected spec for command '{name}': '{spec}'")
def get_preamble() -> str:
lines = [latex_preamble]
for command in latex_commands:
lines.append(newcommand(command, latex_commands[command]))
return "\n".join(lines)
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Additional stuff for the LaTeX preamble.
'preamble': get_preamble(),
# Latex figure (float) alignment
# 'figure_align': 'htbp',
'fncychap': '\\usepackage{fncychap}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(
master_doc,
f'{name}.tex',
f'{project} Documentation',
author,
'manual',
)]
# Set to True if you want all displayed math to be numbered. Defaults to False.
math_number_all = True
# A string that are used for format of label of references to equations. As a
# special character, {number} will be replaced to equation number.
math_eqref_format = 'Eq. {number}'
# If True, displayed math equations are numbered across pages when numfig is
# enabled. The numfig_secnum_depth setting is respected. The eq, not numref,
# role must be used to reference equation numbers. Default is True.
math_numfig = True
# -- Options for sphinx.ext.mathjax --------------- --- -- -
# See https://www.sphinx-doc.org/en/master/usage/extensions/math.html#module-
# sphinx.ext.mathjax
mathjax3_config = {
'TeX': {
'Macros': latex_commands,
},
}
# -- Options for sphinx.ext.intersphinx --------------- --- -- -
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
# -- Options for manual page output --------------- --- -- -
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(
master_doc,
name,
f'{project} Documentation',
[author],
1,
)]
# -- Options for Texinfo output --------------- --- -- -
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
master_doc,
name,
f'{project} Documentation',
author,
project,
description,
'Miscellaneous',
)]
|
from functools import wraps
import numpy as np
def _extract_from_singleton_iterable(inputs):
if len(inputs) == 1:
return inputs[0]
return tuple(inputs)
def _get_random_row_idxes(num_rows, subsampling_scheme, random_state):
if subsampling_scheme is None:
return range(num_rows)
elif isinstance(subsampling_scheme, str):
if subsampling_scheme.lower() == "sqrt":
num_subsampled_rows = int(np.sqrt(num_rows))
else:
raise ValueError("Not valid subsampling scheme")
elif subsampling_scheme < 1 and subsampling_scheme > 0:
num_subsampled_rows = int(num_rows * subsampling_scheme)
elif subsampling_scheme >= 1 and isinstance(subsampling_scheme, int):
if subsampling_scheme > num_rows:
raise ValueError(
"Cannot subsample more rows than there are present"
)
num_subsampled_rows = subsampling_scheme
else:
raise ValueError("Not valid subsampling scheme")
inds = random_state.choice(num_rows, num_subsampled_rows, replace=False)
inds.sort()
return inds
def subsampling_fraction(num_rows, subsampling_scheme, random_state):
return (
len(
_get_random_row_idxes(
num_rows, subsampling_scheme, random_state=random_state
)
)
/ num_rows
)
def subsample(subsampling_scheme, *Xs, random_state):
"""Subsample along first (0-th) axis of the Xs arrays.
Arguments
---------
subsampling_scheme : int, float or str
How to subsample:
* int or float == 1 -> no subsampling
* int > 1 -> that many rows are sampled
* float < 1 -> the fraction of rows to subsample
* sqrt -> subsample sqrt(num_rows) rows
"""
assert len(Xs) > 0
if subsampling_scheme == 1:
return _extract_from_singleton_iterable(Xs)
num_rows = Xs[0].shape[0]
inds = _get_random_row_idxes(
num_rows, subsampling_scheme, random_state=random_state
)
return _extract_from_singleton_iterable([X[inds, :] for X in Xs])
class Subsampler:
"""
Utility for subsampling along the first (0-th) axis of the Xs arrays.
Arguments
---------
num_indices : int
How many indices the arrays to subsample from have
subsampling_scheme : int, float or str
How to subsample:
* int or float == 1 -> no subsampling
* int > 1 -> that many rows are sampled
* float < 1 -> the fraction of rows to subsample
* sqrt -> subsample sqrt(num_rows) rows
random_state : np.random.RandomState
"""
def __init__(self, num_indices, subsampling_scheme, random_state):
self.random_state = random_state
self.subsampling_scheme = subsampling_scheme
self.set_num_indices(num_indices)
def set_num_indices(self, num_indices):
self.num_indices_ = num_indices
self.update_indices()
def subsample(self, *Xs):
if self.subsampling_scheme == 1:
return _extract_from_singleton_iterable(Xs)
return _extract_from_singleton_iterable(
[X[self.curr_indices_] for X in Xs]
)
def update_indices(self):
self.curr_indices_ = _get_random_row_idxes(
self.num_indices_,
self.subsampling_scheme,
random_state=self.random_state,
)
def subsample_apply(self, f, *full_inputs):
@wraps(f)
def new_f(*args, **kwargs):
subsampled_inputs = self.subsample(*full_inputs)
return f(*subsampled_inputs, *args, **kwargs)
return new_f
|
import json
import random
import redis
import time
LIST_KEY = "jobs"
r = redis.Redis(decode_responses = True)
while True:
print("Checking for jobs...")
pipeline = r.pipeline()
pipeline.brpop(LIST_KEY, timeout = 5)
pipeline.llen(LIST_KEY)
results = pipeline.execute()
next_job = results[0]
backlog = results[1]
if next_job is None:
print("Nothing to do right now.")
time.sleep(5)
else:
# next_job is a Tuple, that looks like this:
# ('jobs', '{"room": 343, "job": "Room Service"}')
job = json.loads(next_job[1])
print(f"Performing job {job['job']} for room {str(job['room'])}. Backlog {backlog} jobs.")
time.sleep(random.randint(2, 6)) |
while True:
c, m = map(int, input().split())
if c == m == 0:
break
ans = c + m
ans = str(ans)
ans = ans.replace("0", "")
print(int(ans)) |
# All imports needed to run the loading functions
# import ...
from slogger import slogger
import os
# Add any functions you need to load the required data here.
# These could be functions that pull lists of static files
# from the app repo, or from S3, or database queries or whatever.
def get_menu_options():
"""Function to get the menu options
Runs a dummy search for available years. Here you could connect to your database and
fetch a list of unique towns, or count how many records are available etc.
Args:
Returns:
[int]: List of years
"""
slogger('get_menu_options', 'getting available years')
results = [{'label': '2019', 'value': 2019},
{'label': '2018', 'value': 2018},
{'label': '2017', 'value': 2017},
{'label': '2016', 'value': 2016},
{'label': '2015', 'value': 2015},
{'label': '2014', 'value': 2014},
{'label': '2013', 'value': 2013},
{'label': '2012', 'value': 2012},
{'label': '2011', 'value': 2011}]
# Return the results
slogger('get_menu_options', '{} options found'.format(len(results)))
return results
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
def ClassifierSerializer(o):
if isinstance(o, Classifier):
return o.name
return o
class Classifier(models.Model):
name = models.CharField(max_length=255, primary_key=True)
class Meta:
verbose_name = _(u"classifier")
verbose_name_plural = _(u"classifiers")
ordering = ('name',)
def __unicode__(self):
return self.name
class PythonVersion(models.Model):
major = models.IntegerField()
minor = models.IntegerField()
class Meta:
ordering = ('major', 'minor')
verbose_name = _(u'python version')
verbose_name_plural = _(u'python versions')
unique_together = ('major', 'minor')
def __unicode__(self):
return '%s.%s' % (self.major, self.minor)
class PlatformName(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=32)
class Meta:
verbose_name = _(u'platform name')
verbose_name_plural = _(u'platform names')
ordering = ('name', )
def __unicode__(self):
return self.name
class Architecture(models.Model):
key = models.CharField(max_length=16, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'architecture')
verbose_name_plural = _(u'architectures')
ordering = ('name', )
def __unicode__(self):
return self.name
class DistributionType(models.Model):
key = models.CharField(max_length=32, primary_key=True)
name = models.CharField(max_length=64)
class Meta:
verbose_name = _(u'distribution type')
verbose_name_plural = _(u'distribution types')
ordering = ('name', )
def __unicode__(self):
return self.name
|
import pdeapp
import numpy as np
#def gencode(app):
param = np.array([1.0]);
xdg = np.array([0.0, 0.0]);
udg = np.array([2.0, 2.0, 2.0]);
odg = np.array([0.0]);
wdg = np.array([0.0]);
uinf = np.array([0.0]);
time = np.array([0.0]);
print(pdeapp.flux(xdg, udg, odg, wdg, uinf, param, time))
#Flux = getattr(pdeapp, app['Flux'])
#print(Flux(xdg, udg, odg, wdg, uinf, param, time))
# return 0;
|
import numpy as np
from copy import deepcopy
class batch_provider:
def __init__(self, data, batchsize, num_negSamples = 2, seed = 1231245):
'''
Helper class to provide data in batches with negative examples.
data: Training data triples
batchsize: size of the mini-batches
num_negSamples: number of neg. samples.
seed: random seed for neg. sample generation
'''
self.data = deepcopy(data)
self.num_nodes = np.max([np.max(data[:,0]), np.max(data[:,2])])
np.random.seed(seed)
np.random.shuffle(self.data)
self.batchsize = batchsize
self.number_minibatches = int(len(self.data)/batchsize)
self.current_minibatch = 0
self.num_negSamples = num_negSamples
def next_batch(self):
'''
Return the next mini-batch.
Data triples are shuffled after each epoch.
'''
i = self.current_minibatch
di = self.batchsize
mbatch = deepcopy(self.data[i*di:(i+1)*di])
self.current_minibatch += 1
if self.current_minibatch == self.number_minibatches:
np.random.shuffle(self.data)
self.current_minibatch = 0
if self.num_negSamples > 0:
subj, pred, obj, labels = self.apply_neg_examples(list(mbatch[:,0]), list(mbatch[:,1]), list(mbatch[:,2]))
return subj, pred, obj, labels
else:
return mbatch[:,0], mbatch[:,1], mbatch[:,2]
def apply_neg_examples(self, subj, pred, obj):
'''
Generate neg. samples for a mini-batch.
Both subject and object neg. samples are generated.
'''
vsize = len(subj)
labels = np.array([1 for i in range(vsize)] + [-1 for i in range(self.num_negSamples*2*vsize)])
neg_subj = list(np.random.randint(self.num_nodes, size = self.num_negSamples*vsize))
neg_obj = list(np.random.randint(self.num_nodes, size = self.num_negSamples*vsize))
return np.concatenate([subj, neg_subj, subj*self.num_negSamples]), np.concatenate([pred*(2*self.num_negSamples+1)]), np.concatenate([obj, obj*self.num_negSamples, neg_obj]), labels
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class CollectionOfAlert(msrest.serialization.Model):
"""Collection of alert.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~security.models.MicrosoftGraphAlert]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphAlert]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfAlert, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSecureScore(msrest.serialization.Model):
"""Collection of secureScore.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~security.models.MicrosoftGraphSecureScore]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSecureScore]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSecureScore, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class CollectionOfSecureScoreControlProfile(msrest.serialization.Model):
"""Collection of secureScoreControlProfile.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param value:
:type value: list[~security.models.MicrosoftGraphSecureScoreControlProfile]
:param odata_next_link:
:type odata_next_link: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'value': {'key': 'value', 'type': '[MicrosoftGraphSecureScoreControlProfile]'},
'odata_next_link': {'key': '@odata\\.nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CollectionOfSecureScoreControlProfile, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.value = kwargs.get('value', None)
self.odata_next_link = kwargs.get('odata_next_link', None)
class MicrosoftGraphEntity(msrest.serialization.Model):
"""entity.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param id: Read-only.
:type id: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphEntity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.id = kwargs.get('id', None)
class MicrosoftGraphAlert(MicrosoftGraphEntity):
"""alert.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param activity_group_name: Name or alias of the activity group (attacker) this alert is
attributed to.
:type activity_group_name: str
:param assigned_to: Name of the analyst the alert is assigned to for triage, investigation, or
remediation (supports update).
:type assigned_to: str
:param azure_subscription_id: Azure subscription ID, present if this alert is related to an
Azure resource.
:type azure_subscription_id: str
:param azure_tenant_id: Azure Active Directory tenant ID. Required.
:type azure_tenant_id: str
:param category: Category of the alert (for example, credentialTheft, ransomware, etc.).
:type category: str
:param closed_date_time: Time at which the alert was closed. The Timestamp type represents date
and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC
on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z' (supports update).
:type closed_date_time: ~datetime.datetime
:param cloud_app_states: Security-related stateful information generated by the provider about
the cloud application/s related to this alert.
:type cloud_app_states: list[~security.models.MicrosoftGraphCloudAppSecurityState]
:param comments: Customer-provided comments on alert (for customer alert management) (supports
update).
:type comments: list[str]
:param confidence: Confidence of the detection logic (percentage between 1-100).
:type confidence: int
:param created_date_time: Time at which the alert was created by the alert provider. The
Timestamp type represents date and time information using ISO 8601 format and is always in UTC
time. For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
Required.
:type created_date_time: ~datetime.datetime
:param description: Alert description.
:type description: str
:param detection_ids: Set of alerts related to this alert entity (each alert is pushed to the
SIEM as a separate record).
:type detection_ids: list[str]
:param event_date_time: Time at which the event(s) that served as the trigger(s) to generate
the alert occurred. The Timestamp type represents date and time information using ISO 8601
format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like
this: '2014-01-01T00:00:00Z'. Required.
:type event_date_time: ~datetime.datetime
:param feedback: Possible values include: "unknown", "truePositive", "falsePositive",
"benignPositive", "unknownFutureValue".
:type feedback: str or ~security.models.MicrosoftGraphAlertFeedback
:param file_states: Security-related stateful information generated by the provider about the
file(s) related to this alert.
:type file_states: list[~security.models.MicrosoftGraphFileSecurityState]
:param history_states:
:type history_states: list[~security.models.MicrosoftGraphAlertHistoryState]
:param host_states: Security-related stateful information generated by the provider about the
host(s) related to this alert.
:type host_states: list[~security.models.MicrosoftGraphHostSecurityState]
:param incident_ids: IDs of incidents related to current alert.
:type incident_ids: list[str]
:param last_modified_date_time: Time at which the alert entity was last modified. The Timestamp
type represents date and time information using ISO 8601 format and is always in UTC time. For
example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
:type last_modified_date_time: ~datetime.datetime
:param malware_states: Threat Intelligence pertaining to malware related to this alert.
:type malware_states: list[~security.models.MicrosoftGraphMalwareState]
:param network_connections: Security-related stateful information generated by the provider
about the network connection(s) related to this alert.
:type network_connections: list[~security.models.MicrosoftGraphNetworkConnection]
:param processes: Security-related stateful information generated by the provider about the
process or processes related to this alert.
:type processes: list[~security.models.MicrosoftGraphProcess]
:param recommended_actions: Vendor/provider recommended action(s) to take as a result of the
alert (for example, isolate machine, enforce2FA, reimage host).
:type recommended_actions: list[str]
:param registry_key_states: Security-related stateful information generated by the provider
about the registry keys related to this alert.
:type registry_key_states: list[~security.models.MicrosoftGraphRegistryKeyState]
:param security_resources: Resources related to current alert. For example, for some alerts
this can have the Azure Resource value.
:type security_resources: list[~security.models.MicrosoftGraphSecurityResource]
:param severity: Possible values include: "unknown", "informational", "low", "medium", "high",
"unknownFutureValue".
:type severity: str or ~security.models.MicrosoftGraphAlertSeverity
:param source_materials: Hyperlinks (URIs) to the source material related to the alert, for
example, provider's user interface for alerts or log search, etc.
:type source_materials: list[str]
:param status: Possible values include: "unknown", "newAlert", "inProgress", "resolved",
"dismissed", "unknownFutureValue".
:type status: str or ~security.models.MicrosoftGraphAlertStatus
:param tags: A set of tags. User-definable labels that can be applied to an alert and can serve
as filter conditions (for example 'HVA', 'SAW', etc.) (supports update).
:type tags: list[str]
:param title: Alert title. Required.
:type title: str
:param triggers: Security-related information about the specific properties that triggered the
alert (properties appearing in the alert). Alerts might contain information about multiple
users, hosts, files, ip addresses. This field indicates which properties triggered the alert
generation.
:type triggers: list[~security.models.MicrosoftGraphAlertTrigger]
:param user_states: Security-related stateful information generated by the provider about the
user accounts related to this alert.
:type user_states: list[~security.models.MicrosoftGraphUserSecurityState]
:param vendor_information: securityVendorInformation.
:type vendor_information: ~security.models.MicrosoftGraphSecurityVendorInformation
:param vulnerability_states: Threat intelligence pertaining to one or more vulnerabilities
related to this alert.
:type vulnerability_states: list[~security.models.MicrosoftGraphVulnerabilityState]
"""
_validation = {
'confidence': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'activity_group_name': {'key': 'activityGroupName', 'type': 'str'},
'assigned_to': {'key': 'assignedTo', 'type': 'str'},
'azure_subscription_id': {'key': 'azureSubscriptionId', 'type': 'str'},
'azure_tenant_id': {'key': 'azureTenantId', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'closed_date_time': {'key': 'closedDateTime', 'type': 'iso-8601'},
'cloud_app_states': {'key': 'cloudAppStates', 'type': '[MicrosoftGraphCloudAppSecurityState]'},
'comments': {'key': 'comments', 'type': '[str]'},
'confidence': {'key': 'confidence', 'type': 'int'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'description': {'key': 'description', 'type': 'str'},
'detection_ids': {'key': 'detectionIds', 'type': '[str]'},
'event_date_time': {'key': 'eventDateTime', 'type': 'iso-8601'},
'feedback': {'key': 'feedback', 'type': 'str'},
'file_states': {'key': 'fileStates', 'type': '[MicrosoftGraphFileSecurityState]'},
'history_states': {'key': 'historyStates', 'type': '[MicrosoftGraphAlertHistoryState]'},
'host_states': {'key': 'hostStates', 'type': '[MicrosoftGraphHostSecurityState]'},
'incident_ids': {'key': 'incidentIds', 'type': '[str]'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'malware_states': {'key': 'malwareStates', 'type': '[MicrosoftGraphMalwareState]'},
'network_connections': {'key': 'networkConnections', 'type': '[MicrosoftGraphNetworkConnection]'},
'processes': {'key': 'processes', 'type': '[MicrosoftGraphProcess]'},
'recommended_actions': {'key': 'recommendedActions', 'type': '[str]'},
'registry_key_states': {'key': 'registryKeyStates', 'type': '[MicrosoftGraphRegistryKeyState]'},
'security_resources': {'key': 'securityResources', 'type': '[MicrosoftGraphSecurityResource]'},
'severity': {'key': 'severity', 'type': 'str'},
'source_materials': {'key': 'sourceMaterials', 'type': '[str]'},
'status': {'key': 'status', 'type': 'str'},
'tags': {'key': 'tags', 'type': '[str]'},
'title': {'key': 'title', 'type': 'str'},
'triggers': {'key': 'triggers', 'type': '[MicrosoftGraphAlertTrigger]'},
'user_states': {'key': 'userStates', 'type': '[MicrosoftGraphUserSecurityState]'},
'vendor_information': {'key': 'vendorInformation', 'type': 'MicrosoftGraphSecurityVendorInformation'},
'vulnerability_states': {'key': 'vulnerabilityStates', 'type': '[MicrosoftGraphVulnerabilityState]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAlert, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.activity_group_name = kwargs.get('activity_group_name', None)
self.assigned_to = kwargs.get('assigned_to', None)
self.azure_subscription_id = kwargs.get('azure_subscription_id', None)
self.azure_tenant_id = kwargs.get('azure_tenant_id', None)
self.category = kwargs.get('category', None)
self.closed_date_time = kwargs.get('closed_date_time', None)
self.cloud_app_states = kwargs.get('cloud_app_states', None)
self.comments = kwargs.get('comments', None)
self.confidence = kwargs.get('confidence', None)
self.created_date_time = kwargs.get('created_date_time', None)
self.description = kwargs.get('description', None)
self.detection_ids = kwargs.get('detection_ids', None)
self.event_date_time = kwargs.get('event_date_time', None)
self.feedback = kwargs.get('feedback', None)
self.file_states = kwargs.get('file_states', None)
self.history_states = kwargs.get('history_states', None)
self.host_states = kwargs.get('host_states', None)
self.incident_ids = kwargs.get('incident_ids', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.malware_states = kwargs.get('malware_states', None)
self.network_connections = kwargs.get('network_connections', None)
self.processes = kwargs.get('processes', None)
self.recommended_actions = kwargs.get('recommended_actions', None)
self.registry_key_states = kwargs.get('registry_key_states', None)
self.security_resources = kwargs.get('security_resources', None)
self.severity = kwargs.get('severity', None)
self.source_materials = kwargs.get('source_materials', None)
self.status = kwargs.get('status', None)
self.tags = kwargs.get('tags', None)
self.title = kwargs.get('title', None)
self.triggers = kwargs.get('triggers', None)
self.user_states = kwargs.get('user_states', None)
self.vendor_information = kwargs.get('vendor_information', None)
self.vulnerability_states = kwargs.get('vulnerability_states', None)
class MicrosoftGraphAlertHistoryState(msrest.serialization.Model):
"""alertHistoryState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param app_id:
:type app_id: str
:param assigned_to:
:type assigned_to: str
:param comments:
:type comments: list[str]
:param feedback: Possible values include: "unknown", "truePositive", "falsePositive",
"benignPositive", "unknownFutureValue".
:type feedback: str or ~security.models.MicrosoftGraphAlertFeedback
:param status: Possible values include: "unknown", "newAlert", "inProgress", "resolved",
"dismissed", "unknownFutureValue".
:type status: str or ~security.models.MicrosoftGraphAlertStatus
:param updated_date_time:
:type updated_date_time: ~datetime.datetime
:param user:
:type user: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'app_id': {'key': 'appId', 'type': 'str'},
'assigned_to': {'key': 'assignedTo', 'type': 'str'},
'comments': {'key': 'comments', 'type': '[str]'},
'feedback': {'key': 'feedback', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'updated_date_time': {'key': 'updatedDateTime', 'type': 'iso-8601'},
'user': {'key': 'user', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAlertHistoryState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.app_id = kwargs.get('app_id', None)
self.assigned_to = kwargs.get('assigned_to', None)
self.comments = kwargs.get('comments', None)
self.feedback = kwargs.get('feedback', None)
self.status = kwargs.get('status', None)
self.updated_date_time = kwargs.get('updated_date_time', None)
self.user = kwargs.get('user', None)
class MicrosoftGraphAlertTrigger(msrest.serialization.Model):
"""alertTrigger.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param name: Name of the property serving as a detection trigger.
:type name: str
:param type: Type of the property in the key:value pair for interpretation. For example,
String, Boolean, etc.
:type type: str
:param value: Value of the property serving as a detection trigger.
:type value: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAlertTrigger, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
self.value = kwargs.get('value', None)
class MicrosoftGraphAverageComparativeScore(msrest.serialization.Model):
"""averageComparativeScore.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param average_score: Average score within specified basis.
:type average_score: float
:param basis: Scope type. The possible values are: AllTenants, TotalSeats, IndustryTypes.
:type basis: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'average_score': {'key': 'averageScore', 'type': 'float'},
'basis': {'key': 'basis', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphAverageComparativeScore, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.average_score = kwargs.get('average_score', None)
self.basis = kwargs.get('basis', None)
class MicrosoftGraphCertificationControl(msrest.serialization.Model):
"""certificationControl.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param name: Certification control name.
:type name: str
:param url: URL for the Microsoft Service Trust Portal.
:type url: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCertificationControl, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.name = kwargs.get('name', None)
self.url = kwargs.get('url', None)
class MicrosoftGraphCloudAppSecurityState(msrest.serialization.Model):
"""cloudAppSecurityState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param destination_service_ip: Destination IP Address of the connection to the cloud
application/service.
:type destination_service_ip: str
:param destination_service_name: Cloud application/service name (for example 'Salesforce',
'DropBox', etc.).
:type destination_service_name: str
:param risk_score: Provider-generated/calculated risk score of the Cloud Application/Service.
Recommended value range of 0-1, which equates to a percentage.
:type risk_score: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'destination_service_ip': {'key': 'destinationServiceIp', 'type': 'str'},
'destination_service_name': {'key': 'destinationServiceName', 'type': 'str'},
'risk_score': {'key': 'riskScore', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphCloudAppSecurityState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.destination_service_ip = kwargs.get('destination_service_ip', None)
self.destination_service_name = kwargs.get('destination_service_name', None)
self.risk_score = kwargs.get('risk_score', None)
class MicrosoftGraphComplianceInformation(msrest.serialization.Model):
"""complianceInformation.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param certification_controls: Collection of the certification controls associated with
certification.
:type certification_controls: list[~security.models.MicrosoftGraphCertificationControl]
:param certification_name: Compliance certification name (for example, ISO 27018:2014, GDPR,
FedRAMP, NIST 800-171).
:type certification_name: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'certification_controls': {'key': 'certificationControls', 'type': '[MicrosoftGraphCertificationControl]'},
'certification_name': {'key': 'certificationName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphComplianceInformation, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.certification_controls = kwargs.get('certification_controls', None)
self.certification_name = kwargs.get('certification_name', None)
class MicrosoftGraphControlScore(msrest.serialization.Model):
"""controlScore.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param control_category: Control action category (Identity, Data, Device, Apps,
Infrastructure).
:type control_category: str
:param control_name: Control unique name.
:type control_name: str
:param description: Description of the control.
:type description: str
:param score: Tenant achieved score for the control (it varies day by day depending on tenant
operations on the control).
:type score: float
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'control_category': {'key': 'controlCategory', 'type': 'str'},
'control_name': {'key': 'controlName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphControlScore, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.control_category = kwargs.get('control_category', None)
self.control_name = kwargs.get('control_name', None)
self.description = kwargs.get('description', None)
self.score = kwargs.get('score', None)
class MicrosoftGraphFileHash(msrest.serialization.Model):
"""fileHash.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param hash_type: Possible values include: "unknown", "sha1", "sha256", "md5",
"authenticodeHash256", "lsHash", "ctph", "unknownFutureValue".
:type hash_type: str or ~security.models.MicrosoftGraphFileHashType
:param hash_value: Value of the file hash.
:type hash_value: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'hash_type': {'key': 'hashType', 'type': 'str'},
'hash_value': {'key': 'hashValue', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphFileHash, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.hash_type = kwargs.get('hash_type', None)
self.hash_value = kwargs.get('hash_value', None)
class MicrosoftGraphFileSecurityState(msrest.serialization.Model):
"""fileSecurityState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param file_hash: fileHash.
:type file_hash: ~security.models.MicrosoftGraphFileHash
:param name: File name (without path).
:type name: str
:param path: Full file path of the file/imageFile.
:type path: str
:param risk_score: Provider generated/calculated risk score of the alert file. Recommended
value range of 0-1, which equates to a percentage.
:type risk_score: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'file_hash': {'key': 'fileHash', 'type': 'MicrosoftGraphFileHash'},
'name': {'key': 'name', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'risk_score': {'key': 'riskScore', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphFileSecurityState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.file_hash = kwargs.get('file_hash', None)
self.name = kwargs.get('name', None)
self.path = kwargs.get('path', None)
self.risk_score = kwargs.get('risk_score', None)
class MicrosoftGraphHostSecurityState(msrest.serialization.Model):
"""hostSecurityState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param fqdn: Host FQDN (Fully Qualified Domain Name) (for example, machine.company.com).
:type fqdn: str
:param is_azure_ad_joined:
:type is_azure_ad_joined: bool
:param is_azure_ad_registered:
:type is_azure_ad_registered: bool
:param is_hybrid_azure_domain_joined: True if the host is domain joined to an on-premises
Active Directory domain.
:type is_hybrid_azure_domain_joined: bool
:param net_bios_name: The local host name, without the DNS domain name.
:type net_bios_name: str
:param os: Host Operating System. (For example, Windows10, MacOS, RHEL, etc.).
:type os: str
:param private_ip_address: Private (not routable) IPv4 or IPv6 address (see RFC 1918) at the
time of the alert.
:type private_ip_address: str
:param public_ip_address: Publicly routable IPv4 or IPv6 address (see RFC 1918) at time of the
alert.
:type public_ip_address: str
:param risk_score: Provider-generated/calculated risk score of the host. Recommended value
range of 0-1, which equates to a percentage.
:type risk_score: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'is_azure_ad_joined': {'key': 'isAzureAdJoined', 'type': 'bool'},
'is_azure_ad_registered': {'key': 'isAzureAdRegistered', 'type': 'bool'},
'is_hybrid_azure_domain_joined': {'key': 'isHybridAzureDomainJoined', 'type': 'bool'},
'net_bios_name': {'key': 'netBiosName', 'type': 'str'},
'os': {'key': 'os', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'public_ip_address': {'key': 'publicIpAddress', 'type': 'str'},
'risk_score': {'key': 'riskScore', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphHostSecurityState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.fqdn = kwargs.get('fqdn', None)
self.is_azure_ad_joined = kwargs.get('is_azure_ad_joined', None)
self.is_azure_ad_registered = kwargs.get('is_azure_ad_registered', None)
self.is_hybrid_azure_domain_joined = kwargs.get('is_hybrid_azure_domain_joined', None)
self.net_bios_name = kwargs.get('net_bios_name', None)
self.os = kwargs.get('os', None)
self.private_ip_address = kwargs.get('private_ip_address', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.risk_score = kwargs.get('risk_score', None)
class MicrosoftGraphMalwareState(msrest.serialization.Model):
"""malwareState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param category: Provider-generated malware category (for example, trojan, ransomware, etc.).
:type category: str
:param family: Provider-generated malware family (for example, 'wannacry', 'notpetya', etc.).
:type family: str
:param name: Provider-generated malware variant name (for example, Trojan:Win32/Powessere.H).
:type name: str
:param severity: Provider-determined severity of this malware.
:type severity: str
:param was_running: Indicates whether the detected file (malware/vulnerability) was running at
the time of detection or was detected at rest on the disk.
:type was_running: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'category': {'key': 'category', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'was_running': {'key': 'wasRunning', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphMalwareState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.category = kwargs.get('category', None)
self.family = kwargs.get('family', None)
self.name = kwargs.get('name', None)
self.severity = kwargs.get('severity', None)
self.was_running = kwargs.get('was_running', None)
class MicrosoftGraphNetworkConnection(msrest.serialization.Model):
"""networkConnection.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param application_name: Name of the application managing the network connection (for example,
Facebook or SMTP).
:type application_name: str
:param destination_address: Destination IP address (of the network connection).
:type destination_address: str
:param destination_domain: Destination domain portion of the destination URL. (for example
'www.contoso.com').
:type destination_domain: str
:param destination_location: Location (by IP address mapping) associated with the destination
of a network connection.
:type destination_location: str
:param destination_port: Destination port (of the network connection).
:type destination_port: str
:param destination_url: Network connection URL/URI string - excluding parameters. (for example
'www.contoso.com/products/default.html').
:type destination_url: str
:param direction: Possible values include: "unknown", "inbound", "outbound",
"unknownFutureValue".
:type direction: str or ~security.models.MicrosoftGraphConnectionDirection
:param domain_registered_date_time: Date when the destination domain was registered. The
Timestamp type represents date and time information using ISO 8601 format and is always in UTC
time. For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
:type domain_registered_date_time: ~datetime.datetime
:param local_dns_name: The local DNS name resolution as it appears in the host's local DNS
cache (for example, in case the 'hosts' file was tampered with).
:type local_dns_name: str
:param nat_destination_address: Network Address Translation destination IP address.
:type nat_destination_address: str
:param nat_destination_port: Network Address Translation destination port.
:type nat_destination_port: str
:param nat_source_address: Network Address Translation source IP address.
:type nat_source_address: str
:param nat_source_port: Network Address Translation source port.
:type nat_source_port: str
:param protocol: Possible values include: "ip", "icmp", "unknown", "igmp", "ggp", "ipv4",
"tcp", "pup", "udp", "idp", "ipv6", "ipv6RoutingHeader", "ipv6FragmentHeader",
"ipSecEncapsulatingSecurityPayload", "ipSecAuthenticationHeader", "icmpV6", "ipv6NoNextHeader",
"ipv6DestinationOptions", "nd", "raw", "ipx", "spx", "spxII", "unknownFutureValue".
:type protocol: str or ~security.models.MicrosoftGraphSecurityNetworkProtocol
:param risk_score: Provider generated/calculated risk score of the network connection.
Recommended value range of 0-1, which equates to a percentage.
:type risk_score: str
:param source_address: Source (i.e. origin) IP address (of the network connection).
:type source_address: str
:param source_location: Location (by IP address mapping) associated with the source of a
network connection.
:type source_location: str
:param source_port: Source (i.e. origin) IP port (of the network connection).
:type source_port: str
:param status: Possible values include: "unknown", "attempted", "succeeded", "blocked",
"failed", "unknownFutureValue".
:type status: str or ~security.models.MicrosoftGraphConnectionStatus
:param url_parameters: Parameters (suffix) of the destination URL.
:type url_parameters: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'application_name': {'key': 'applicationName', 'type': 'str'},
'destination_address': {'key': 'destinationAddress', 'type': 'str'},
'destination_domain': {'key': 'destinationDomain', 'type': 'str'},
'destination_location': {'key': 'destinationLocation', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'str'},
'destination_url': {'key': 'destinationUrl', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'domain_registered_date_time': {'key': 'domainRegisteredDateTime', 'type': 'iso-8601'},
'local_dns_name': {'key': 'localDnsName', 'type': 'str'},
'nat_destination_address': {'key': 'natDestinationAddress', 'type': 'str'},
'nat_destination_port': {'key': 'natDestinationPort', 'type': 'str'},
'nat_source_address': {'key': 'natSourceAddress', 'type': 'str'},
'nat_source_port': {'key': 'natSourcePort', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'risk_score': {'key': 'riskScore', 'type': 'str'},
'source_address': {'key': 'sourceAddress', 'type': 'str'},
'source_location': {'key': 'sourceLocation', 'type': 'str'},
'source_port': {'key': 'sourcePort', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'url_parameters': {'key': 'urlParameters', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphNetworkConnection, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.application_name = kwargs.get('application_name', None)
self.destination_address = kwargs.get('destination_address', None)
self.destination_domain = kwargs.get('destination_domain', None)
self.destination_location = kwargs.get('destination_location', None)
self.destination_port = kwargs.get('destination_port', None)
self.destination_url = kwargs.get('destination_url', None)
self.direction = kwargs.get('direction', None)
self.domain_registered_date_time = kwargs.get('domain_registered_date_time', None)
self.local_dns_name = kwargs.get('local_dns_name', None)
self.nat_destination_address = kwargs.get('nat_destination_address', None)
self.nat_destination_port = kwargs.get('nat_destination_port', None)
self.nat_source_address = kwargs.get('nat_source_address', None)
self.nat_source_port = kwargs.get('nat_source_port', None)
self.protocol = kwargs.get('protocol', None)
self.risk_score = kwargs.get('risk_score', None)
self.source_address = kwargs.get('source_address', None)
self.source_location = kwargs.get('source_location', None)
self.source_port = kwargs.get('source_port', None)
self.status = kwargs.get('status', None)
self.url_parameters = kwargs.get('url_parameters', None)
class MicrosoftGraphProcess(msrest.serialization.Model):
"""process.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param account_name: User account identifier (user account context the process ran under) for
example, AccountName, SID, and so on.
:type account_name: str
:param command_line: The full process invocation commandline including all parameters.
:type command_line: str
:param created_date_time: Time at which the process was started. The Timestamp type represents
date and time information using ISO 8601 format and is always in UTC time. For example,
midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
:type created_date_time: ~datetime.datetime
:param file_hash: fileHash.
:type file_hash: ~security.models.MicrosoftGraphFileHash
:param integrity_level: Possible values include: "unknown", "untrusted", "low", "medium",
"high", "system", "unknownFutureValue".
:type integrity_level: str or ~security.models.MicrosoftGraphProcessIntegrityLevel
:param is_elevated: True if the process is elevated.
:type is_elevated: bool
:param name: The name of the process' Image file.
:type name: str
:param parent_process_created_date_time: DateTime at which the parent process was started. The
Timestamp type represents date and time information using ISO 8601 format and is always in UTC
time. For example, midnight UTC on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
:type parent_process_created_date_time: ~datetime.datetime
:param parent_process_id: The Process ID (PID) of the parent process.
:type parent_process_id: int
:param parent_process_name: The name of the image file of the parent process.
:type parent_process_name: str
:param path: Full path, including filename.
:type path: str
:param process_id: The Process ID (PID) of the process.
:type process_id: int
"""
_validation = {
'parent_process_id': {'maximum': 2147483647, 'minimum': -2147483648},
'process_id': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'account_name': {'key': 'accountName', 'type': 'str'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'file_hash': {'key': 'fileHash', 'type': 'MicrosoftGraphFileHash'},
'integrity_level': {'key': 'integrityLevel', 'type': 'str'},
'is_elevated': {'key': 'isElevated', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'parent_process_created_date_time': {'key': 'parentProcessCreatedDateTime', 'type': 'iso-8601'},
'parent_process_id': {'key': 'parentProcessId', 'type': 'int'},
'parent_process_name': {'key': 'parentProcessName', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
'process_id': {'key': 'processId', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphProcess, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.account_name = kwargs.get('account_name', None)
self.command_line = kwargs.get('command_line', None)
self.created_date_time = kwargs.get('created_date_time', None)
self.file_hash = kwargs.get('file_hash', None)
self.integrity_level = kwargs.get('integrity_level', None)
self.is_elevated = kwargs.get('is_elevated', None)
self.name = kwargs.get('name', None)
self.parent_process_created_date_time = kwargs.get('parent_process_created_date_time', None)
self.parent_process_id = kwargs.get('parent_process_id', None)
self.parent_process_name = kwargs.get('parent_process_name', None)
self.path = kwargs.get('path', None)
self.process_id = kwargs.get('process_id', None)
class MicrosoftGraphRegistryKeyState(msrest.serialization.Model):
"""registryKeyState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param hive: Possible values include: "unknown", "currentConfig", "currentUser",
"localMachineSam", "localMachineSecurity", "localMachineSoftware", "localMachineSystem",
"usersDefault", "unknownFutureValue".
:type hive: str or ~security.models.MicrosoftGraphRegistryHive
:param key: Current (i.e. changed) registry key (excludes HIVE).
:type key: str
:param old_key: Previous (i.e. before changed) registry key (excludes HIVE).
:type old_key: str
:param old_value_data: Previous (i.e. before changed) registry key value data (contents).
:type old_value_data: str
:param old_value_name: Previous (i.e. before changed) registry key value name.
:type old_value_name: str
:param operation: Possible values include: "unknown", "create", "modify", "delete",
"unknownFutureValue".
:type operation: str or ~security.models.MicrosoftGraphRegistryOperation
:param process_id: Process ID (PID) of the process that modified the registry key (process
details will appear in the alert 'processes' collection).
:type process_id: int
:param value_data: Current (i.e. changed) registry key value data (contents).
:type value_data: str
:param value_name: Current (i.e. changed) registry key value name.
:type value_name: str
:param value_type: Possible values include: "unknown", "binary", "dword", "dwordLittleEndian",
"dwordBigEndian", "expandSz", "link", "multiSz", "none", "qword", "qwordlittleEndian", "sz",
"unknownFutureValue".
:type value_type: str or ~security.models.MicrosoftGraphRegistryValueType
"""
_validation = {
'process_id': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'hive': {'key': 'hive', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'old_key': {'key': 'oldKey', 'type': 'str'},
'old_value_data': {'key': 'oldValueData', 'type': 'str'},
'old_value_name': {'key': 'oldValueName', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'process_id': {'key': 'processId', 'type': 'int'},
'value_data': {'key': 'valueData', 'type': 'str'},
'value_name': {'key': 'valueName', 'type': 'str'},
'value_type': {'key': 'valueType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphRegistryKeyState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.hive = kwargs.get('hive', None)
self.key = kwargs.get('key', None)
self.old_key = kwargs.get('old_key', None)
self.old_value_data = kwargs.get('old_value_data', None)
self.old_value_name = kwargs.get('old_value_name', None)
self.operation = kwargs.get('operation', None)
self.process_id = kwargs.get('process_id', None)
self.value_data = kwargs.get('value_data', None)
self.value_name = kwargs.get('value_name', None)
self.value_type = kwargs.get('value_type', None)
class MicrosoftGraphSecureScore(MicrosoftGraphEntity):
"""secureScore.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param active_user_count: Active user count of the given tenant.
:type active_user_count: int
:param average_comparative_scores: Average score by different scopes (for example, average by
industry, average by seating) and control category (Identity, Data, Device, Apps,
Infrastructure) within the scope.
:type average_comparative_scores: list[~security.models.MicrosoftGraphAverageComparativeScore]
:param azure_tenant_id: GUID string for tenant ID.
:type azure_tenant_id: str
:param control_scores: Contains tenant scores for a set of controls.
:type control_scores: list[~security.models.MicrosoftGraphControlScore]
:param created_date_time: The date when the entity is created.
:type created_date_time: ~datetime.datetime
:param current_score: Tenant current attained score on specified date.
:type current_score: float
:param enabled_services: Microsoft-provided services for the tenant (for example, Exchange
online, Skype, Sharepoint).
:type enabled_services: list[str]
:param licensed_user_count: Licensed user count of the given tenant.
:type licensed_user_count: int
:param max_score: Tenant maximum possible score on specified date.
:type max_score: float
:param vendor_information: securityVendorInformation.
:type vendor_information: ~security.models.MicrosoftGraphSecurityVendorInformation
"""
_validation = {
'active_user_count': {'maximum': 2147483647, 'minimum': -2147483648},
'licensed_user_count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'active_user_count': {'key': 'activeUserCount', 'type': 'int'},
'average_comparative_scores': {'key': 'averageComparativeScores', 'type': '[MicrosoftGraphAverageComparativeScore]'},
'azure_tenant_id': {'key': 'azureTenantId', 'type': 'str'},
'control_scores': {'key': 'controlScores', 'type': '[MicrosoftGraphControlScore]'},
'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'},
'current_score': {'key': 'currentScore', 'type': 'float'},
'enabled_services': {'key': 'enabledServices', 'type': '[str]'},
'licensed_user_count': {'key': 'licensedUserCount', 'type': 'int'},
'max_score': {'key': 'maxScore', 'type': 'float'},
'vendor_information': {'key': 'vendorInformation', 'type': 'MicrosoftGraphSecurityVendorInformation'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecureScore, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.active_user_count = kwargs.get('active_user_count', None)
self.average_comparative_scores = kwargs.get('average_comparative_scores', None)
self.azure_tenant_id = kwargs.get('azure_tenant_id', None)
self.control_scores = kwargs.get('control_scores', None)
self.created_date_time = kwargs.get('created_date_time', None)
self.current_score = kwargs.get('current_score', None)
self.enabled_services = kwargs.get('enabled_services', None)
self.licensed_user_count = kwargs.get('licensed_user_count', None)
self.max_score = kwargs.get('max_score', None)
self.vendor_information = kwargs.get('vendor_information', None)
class MicrosoftGraphSecureScoreControlProfile(MicrosoftGraphEntity):
"""secureScoreControlProfile.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param action_type: Control action type (Config, Review, Behavior).
:type action_type: str
:param action_url: URL to where the control can be actioned.
:type action_url: str
:param azure_tenant_id: GUID string for tenant ID.
:type azure_tenant_id: str
:param compliance_information:
:type compliance_information: list[~security.models.MicrosoftGraphComplianceInformation]
:param control_category: Control action category (Identity, Data, Device, Apps,
Infrastructure).
:type control_category: str
:param control_state_updates:
:type control_state_updates: list[~security.models.MicrosoftGraphSecureScoreControlStateUpdate]
:param deprecated: Flag to indicate if a control is depreciated.
:type deprecated: bool
:param implementation_cost: Resource cost of implemmentating control (low, moderate, high).
:type implementation_cost: str
:param last_modified_date_time: Time at which the control profile entity was last modified. The
Timestamp type represents date and time.
:type last_modified_date_time: ~datetime.datetime
:param max_score: max attainable score for the control.
:type max_score: float
:param rank: Microsoft's stack ranking of control.
:type rank: int
:param remediation: Description of what the control will help remediate.
:type remediation: str
:param remediation_impact: Description of the impact on users of the remediation.
:type remediation_impact: str
:param service: Service that owns the control (Exchange, Sharepoint, Azure AD).
:type service: str
:param threats: List of threats the control mitigates
(accountBreach,dataDeletion,dataExfiltration,dataSpillage,.
:type threats: list[str]
:param tier:
:type tier: str
:param title: Title of the control.
:type title: str
:param user_impact:
:type user_impact: str
:param vendor_information: securityVendorInformation.
:type vendor_information: ~security.models.MicrosoftGraphSecurityVendorInformation
"""
_validation = {
'rank': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'action_type': {'key': 'actionType', 'type': 'str'},
'action_url': {'key': 'actionUrl', 'type': 'str'},
'azure_tenant_id': {'key': 'azureTenantId', 'type': 'str'},
'compliance_information': {'key': 'complianceInformation', 'type': '[MicrosoftGraphComplianceInformation]'},
'control_category': {'key': 'controlCategory', 'type': 'str'},
'control_state_updates': {'key': 'controlStateUpdates', 'type': '[MicrosoftGraphSecureScoreControlStateUpdate]'},
'deprecated': {'key': 'deprecated', 'type': 'bool'},
'implementation_cost': {'key': 'implementationCost', 'type': 'str'},
'last_modified_date_time': {'key': 'lastModifiedDateTime', 'type': 'iso-8601'},
'max_score': {'key': 'maxScore', 'type': 'float'},
'rank': {'key': 'rank', 'type': 'int'},
'remediation': {'key': 'remediation', 'type': 'str'},
'remediation_impact': {'key': 'remediationImpact', 'type': 'str'},
'service': {'key': 'service', 'type': 'str'},
'threats': {'key': 'threats', 'type': '[str]'},
'tier': {'key': 'tier', 'type': 'str'},
'title': {'key': 'title', 'type': 'str'},
'user_impact': {'key': 'userImpact', 'type': 'str'},
'vendor_information': {'key': 'vendorInformation', 'type': 'MicrosoftGraphSecurityVendorInformation'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecureScoreControlProfile, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.action_type = kwargs.get('action_type', None)
self.action_url = kwargs.get('action_url', None)
self.azure_tenant_id = kwargs.get('azure_tenant_id', None)
self.compliance_information = kwargs.get('compliance_information', None)
self.control_category = kwargs.get('control_category', None)
self.control_state_updates = kwargs.get('control_state_updates', None)
self.deprecated = kwargs.get('deprecated', None)
self.implementation_cost = kwargs.get('implementation_cost', None)
self.last_modified_date_time = kwargs.get('last_modified_date_time', None)
self.max_score = kwargs.get('max_score', None)
self.rank = kwargs.get('rank', None)
self.remediation = kwargs.get('remediation', None)
self.remediation_impact = kwargs.get('remediation_impact', None)
self.service = kwargs.get('service', None)
self.threats = kwargs.get('threats', None)
self.tier = kwargs.get('tier', None)
self.title = kwargs.get('title', None)
self.user_impact = kwargs.get('user_impact', None)
self.vendor_information = kwargs.get('vendor_information', None)
class MicrosoftGraphSecureScoreControlStateUpdate(msrest.serialization.Model):
"""secureScoreControlStateUpdate.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param assigned_to: Assigns the control to the user who will take the action.
:type assigned_to: str
:param comment: Provides optional comment about the control.
:type comment: str
:param state: State of the control, which can be modified via a PATCH command (for example,
ignored, thirdParty).
:type state: str
:param updated_by: ID of the user who updated tenant state.
:type updated_by: str
:param updated_date_time: Time at which the control state was updated.
:type updated_date_time: ~datetime.datetime
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'assigned_to': {'key': 'assignedTo', 'type': 'str'},
'comment': {'key': 'comment', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'updated_by': {'key': 'updatedBy', 'type': 'str'},
'updated_date_time': {'key': 'updatedDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecureScoreControlStateUpdate, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.assigned_to = kwargs.get('assigned_to', None)
self.comment = kwargs.get('comment', None)
self.state = kwargs.get('state', None)
self.updated_by = kwargs.get('updated_by', None)
self.updated_date_time = kwargs.get('updated_date_time', None)
class MicrosoftGraphSecurity(MicrosoftGraphEntity):
"""security.
:param id: Read-only.
:type id: str
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param alerts: Read-only. Nullable.
:type alerts: list[~security.models.MicrosoftGraphAlert]
:param secure_score_control_profiles:
:type secure_score_control_profiles:
list[~security.models.MicrosoftGraphSecureScoreControlProfile]
:param secure_scores:
:type secure_scores: list[~security.models.MicrosoftGraphSecureScore]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'additional_properties': {'key': '', 'type': '{object}'},
'alerts': {'key': 'alerts', 'type': '[MicrosoftGraphAlert]'},
'secure_score_control_profiles': {'key': 'secureScoreControlProfiles', 'type': '[MicrosoftGraphSecureScoreControlProfile]'},
'secure_scores': {'key': 'secureScores', 'type': '[MicrosoftGraphSecureScore]'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecurity, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.alerts = kwargs.get('alerts', None)
self.secure_score_control_profiles = kwargs.get('secure_score_control_profiles', None)
self.secure_scores = kwargs.get('secure_scores', None)
class MicrosoftGraphSecurityResource(msrest.serialization.Model):
"""securityResource.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param resource: Name of the resource that is related to current alert. Required.
:type resource: str
:param resource_type: Possible values include: "unknown", "attacked", "related",
"unknownFutureValue".
:type resource_type: str or ~security.models.MicrosoftGraphSecurityResourceType
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'resource': {'key': 'resource', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecurityResource, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.resource = kwargs.get('resource', None)
self.resource_type = kwargs.get('resource_type', None)
class MicrosoftGraphSecurityVendorInformation(msrest.serialization.Model):
"""securityVendorInformation.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param provider: Specific provider (product/service - not vendor company); for example,
WindowsDefenderATP.
:type provider: str
:param provider_version: Version of the provider or subprovider, if it exists, that generated
the alert. Required.
:type provider_version: str
:param sub_provider: Specific subprovider (under aggregating provider); for example,
WindowsDefenderATP.SmartScreen.
:type sub_provider: str
:param vendor: Name of the alert vendor (for example, Microsoft, Dell, FireEye). Required.
:type vendor: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'provider': {'key': 'provider', 'type': 'str'},
'provider_version': {'key': 'providerVersion', 'type': 'str'},
'sub_provider': {'key': 'subProvider', 'type': 'str'},
'vendor': {'key': 'vendor', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphSecurityVendorInformation, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.provider = kwargs.get('provider', None)
self.provider_version = kwargs.get('provider_version', None)
self.sub_provider = kwargs.get('sub_provider', None)
self.vendor = kwargs.get('vendor', None)
class MicrosoftGraphUserSecurityState(msrest.serialization.Model):
"""userSecurityState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param aad_user_id: AAD User object identifier (GUID) - represents the physical/multi-account
user entity.
:type aad_user_id: str
:param account_name: Account name of user account (without Active Directory domain or DNS
domain) - (also called mailNickName).
:type account_name: str
:param domain_name: NetBIOS/Active Directory domain of user account (that is, domain/account
format).
:type domain_name: str
:param email_role: Possible values include: "unknown", "sender", "recipient",
"unknownFutureValue".
:type email_role: str or ~security.models.MicrosoftGraphEmailRole
:param is_vpn: Indicates whether the user logged on through a VPN.
:type is_vpn: bool
:param logon_date_time: Time at which the sign-in occurred. The Timestamp type represents date
and time information using ISO 8601 format and is always in UTC time. For example, midnight UTC
on Jan 1, 2014 would look like this: '2014-01-01T00:00:00Z'.
:type logon_date_time: ~datetime.datetime
:param logon_id: User sign-in ID.
:type logon_id: str
:param logon_ip: IP Address the sign-in request originated from.
:type logon_ip: str
:param logon_location: Location (by IP address mapping) associated with a user sign-in event by
this user.
:type logon_location: str
:param logon_type: Possible values include: "unknown", "interactive", "remoteInteractive",
"network", "batch", "service", "unknownFutureValue".
:type logon_type: str or ~security.models.MicrosoftGraphLogonType
:param on_premises_security_identifier: Active Directory (on-premises) Security Identifier
(SID) of the user.
:type on_premises_security_identifier: str
:param risk_score: Provider-generated/calculated risk score of the user account. Recommended
value range of 0-1, which equates to a percentage.
:type risk_score: str
:param user_account_type: Possible values include: "unknown", "standard", "power",
"administrator", "unknownFutureValue".
:type user_account_type: str or ~security.models.MicrosoftGraphUserAccountSecurityType
:param user_principal_name: User sign-in name - internet format: (user account name)@(user
account DNS domain name).
:type user_principal_name: str
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'aad_user_id': {'key': 'aadUserId', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'domain_name': {'key': 'domainName', 'type': 'str'},
'email_role': {'key': 'emailRole', 'type': 'str'},
'is_vpn': {'key': 'isVpn', 'type': 'bool'},
'logon_date_time': {'key': 'logonDateTime', 'type': 'iso-8601'},
'logon_id': {'key': 'logonId', 'type': 'str'},
'logon_ip': {'key': 'logonIp', 'type': 'str'},
'logon_location': {'key': 'logonLocation', 'type': 'str'},
'logon_type': {'key': 'logonType', 'type': 'str'},
'on_premises_security_identifier': {'key': 'onPremisesSecurityIdentifier', 'type': 'str'},
'risk_score': {'key': 'riskScore', 'type': 'str'},
'user_account_type': {'key': 'userAccountType', 'type': 'str'},
'user_principal_name': {'key': 'userPrincipalName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphUserSecurityState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.aad_user_id = kwargs.get('aad_user_id', None)
self.account_name = kwargs.get('account_name', None)
self.domain_name = kwargs.get('domain_name', None)
self.email_role = kwargs.get('email_role', None)
self.is_vpn = kwargs.get('is_vpn', None)
self.logon_date_time = kwargs.get('logon_date_time', None)
self.logon_id = kwargs.get('logon_id', None)
self.logon_ip = kwargs.get('logon_ip', None)
self.logon_location = kwargs.get('logon_location', None)
self.logon_type = kwargs.get('logon_type', None)
self.on_premises_security_identifier = kwargs.get('on_premises_security_identifier', None)
self.risk_score = kwargs.get('risk_score', None)
self.user_account_type = kwargs.get('user_account_type', None)
self.user_principal_name = kwargs.get('user_principal_name', None)
class MicrosoftGraphVulnerabilityState(msrest.serialization.Model):
"""vulnerabilityState.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param cve: Common Vulnerabilities and Exposures (CVE) for the vulnerability.
:type cve: str
:param severity: Base Common Vulnerability Scoring System (CVSS) severity score for this
vulnerability.
:type severity: str
:param was_running: Indicates whether the detected vulnerability (file) was running at the time
of detection or was the file detected at rest on the disk.
:type was_running: bool
"""
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'cve': {'key': 'cve', 'type': 'str'},
'severity': {'key': 'severity', 'type': 'str'},
'was_running': {'key': 'wasRunning', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MicrosoftGraphVulnerabilityState, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.cve = kwargs.get('cve', None)
self.severity = kwargs.get('severity', None)
self.was_running = kwargs.get('was_running', None)
class OdataError(msrest.serialization.Model):
"""OdataError.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param error: Required.
:type error: ~security.models.OdataErrorMain
"""
_validation = {
'error': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'error': {'key': 'error', 'type': 'OdataErrorMain'},
}
def __init__(
self,
**kwargs
):
super(OdataError, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.error = kwargs['error']
class OdataErrorDetail(msrest.serialization.Model):
"""OdataErrorDetail.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorDetail, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
class OdataErrorMain(msrest.serialization.Model):
"""OdataErrorMain.
All required parameters must be populated in order to send to Azure.
:param additional_properties: Unmatched properties from the message are deserialized to this
collection.
:type additional_properties: dict[str, object]
:param code: Required.
:type code: str
:param message: Required.
:type message: str
:param target:
:type target: str
:param details:
:type details: list[~security.models.OdataErrorDetail]
:param innererror: The structure of this object is service-specific.
:type innererror: dict[str, object]
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[OdataErrorDetail]'},
'innererror': {'key': 'innererror', 'type': '{object}'},
}
def __init__(
self,
**kwargs
):
super(OdataErrorMain, self).__init__(**kwargs)
self.additional_properties = kwargs.get('additional_properties', None)
self.code = kwargs['code']
self.message = kwargs['message']
self.target = kwargs.get('target', None)
self.details = kwargs.get('details', None)
self.innererror = kwargs.get('innererror', None)
|
# Copyright (c) 2015
#
# All rights reserved.
#
# This file is distributed under the Clear BSD license.
# The full text can be found in LICENSE in the root directory.
from boardfarm import lib
from boardfarm.devices import prompt
from boardfarm.tests import rootfs_boot
class Sysupgrade(rootfs_boot.RootFSBootTest):
'''Upgrading via sysupgrade works.'''
def runTest(self):
board = self.dev.board
super(Sysupgrade, self).runTest()
if not hasattr(self.config, "SYSUPGRADE_NEW"):
self.skipTest("no sysupgrade specified")
# output some stuff before we kill all the logs in the system, just
# to be able to review these logs later
board.sendline('logread')
board.expect(prompt, timeout=120)
board.sendline('dmesg')
board.expect(prompt)
# This test can damage flash, so to properly recover we need
# to reflash upon recovery
self.reflash = True
board.sendline('touch /etc/config/TEST')
board.expect('/etc/config/TEST')
board.expect(prompt)
board.sendline("cd /tmp")
filename = board.prepare_file(self.config.SYSUPGRADE_NEW)
new_filename = board.tftp_get_file(board.tftp_server, filename, 240)
board.sendline("sysupgrade -v /tmp/%s" % new_filename)
board.expect("Restarting system", timeout=180)
lib.common.wait_for_boot(board)
board.boot_linux()
board.wait_for_linux()
board.sendline('ls -alh /etc/config/TEST')
board.expect('/etc/config/TEST\r\n')
board.expect(prompt)
|
import os
import numpy as np
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
def create_directory(directory):
for i in range(len(directory.split('/'))):
if directory.split('/')[i] != '':
sub_dic ='/'.join(directory.split('/')[:(i+1)])
if not os.path.exists(sub_dic):
os.makedirs(sub_dic)
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_wireless_controller_wtp
short_description: Configure Wireless Termination Points (WTPs), that is, FortiAPs or APs to be managed by FortiGate in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify wireless_controller feature and wtp category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
wireless_controller_wtp:
description:
- Configure Wireless Termination Points (WTPs), that is, FortiAPs or APs to be managed by FortiGate.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
admin:
description:
- Configure how the FortiGate operating as a wireless controller discovers and manages this WTP, AP or FortiAP.
choices:
- discovered
- disable
- enable
allowaccess:
description:
- Control management access to the managed WTP, FortiAP, or AP. Separate entries with a space.
choices:
- telnet
- http
- https
- ssh
bonjour-profile:
description:
- Bonjour profile name. Source wireless-controller.bonjour-profile.name.
coordinate-enable:
description:
- Enable/disable WTP coordinates (X,Y axis).
choices:
- enable
- disable
coordinate-latitude:
description:
- WTP latitude coordinate.
coordinate-longitude:
description:
- WTP longitude coordinate.
coordinate-x:
description:
- X axis coordinate.
coordinate-y:
description:
- Y axis coordinate.
image-download:
description:
- Enable/disable WTP image download.
choices:
- enable
- disable
index:
description:
- Index (0 - 4294967295).
ip-fragment-preventing:
description:
- Method by which IP fragmentation is prevented for CAPWAP tunneled control and data packets (default = tcp-mss-adjust).
choices:
- tcp-mss-adjust
- icmp-unreachable
lan:
description:
- WTP LAN port mapping.
suboptions:
port-mode:
description:
- LAN port mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port-ssid:
description:
- Bridge LAN port to SSID. Source wireless-controller.vap.name.
port1-mode:
description:
- LAN port 1 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port1-ssid:
description:
- Bridge LAN port 1 to SSID. Source wireless-controller.vap.name.
port2-mode:
description:
- LAN port 2 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port2-ssid:
description:
- Bridge LAN port 2 to SSID. Source wireless-controller.vap.name.
port3-mode:
description:
- LAN port 3 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port3-ssid:
description:
- Bridge LAN port 3 to SSID. Source wireless-controller.vap.name.
port4-mode:
description:
- LAN port 4 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port4-ssid:
description:
- Bridge LAN port 4 to SSID. Source wireless-controller.vap.name.
port5-mode:
description:
- LAN port 5 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port5-ssid:
description:
- Bridge LAN port 5 to SSID. Source wireless-controller.vap.name.
port6-mode:
description:
- LAN port 6 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port6-ssid:
description:
- Bridge LAN port 6 to SSID. Source wireless-controller.vap.name.
port7-mode:
description:
- LAN port 7 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port7-ssid:
description:
- Bridge LAN port 7 to SSID. Source wireless-controller.vap.name.
port8-mode:
description:
- LAN port 8 mode.
choices:
- offline
- nat-to-wan
- bridge-to-wan
- bridge-to-ssid
port8-ssid:
description:
- Bridge LAN port 8 to SSID. Source wireless-controller.vap.name.
led-state:
description:
- Enable to allow the FortiAPs LEDs to light. Disable to keep the LEDs off. You may want to keep the LEDs off so they are not distracting
in low light areas etc.
choices:
- enable
- disable
location:
description:
- Field for describing the physical location of the WTP, AP or FortiAP.
login-passwd:
description:
- Set the managed WTP, FortiAP, or AP's administrator password.
login-passwd-change:
description:
- Change or reset the administrator password of a managed WTP, FortiAP or AP (yes, default, or no, default = no).
choices:
- yes
- default
- no
mesh-bridge-enable:
description:
- Enable/disable mesh Ethernet bridge when WTP is configured as a mesh branch/leaf AP.
choices:
- default
- enable
- disable
name:
description:
- WTP, AP or FortiAP configuration name.
override-allowaccess:
description:
- Enable to override the WTP profile management access configuration.
choices:
- enable
- disable
override-ip-fragment:
description:
- Enable/disable overriding the WTP profile IP fragment prevention setting.
choices:
- enable
- disable
override-lan:
description:
- Enable to override the WTP profile LAN port setting.
choices:
- enable
- disable
override-led-state:
description:
- Enable to override the profile LED state setting for this FortiAP. You must enable this option to use the led-state command to turn off
the FortiAP's LEDs.
choices:
- enable
- disable
override-login-passwd-change:
description:
- Enable to override the WTP profile login-password (administrator password) setting.
choices:
- enable
- disable
override-split-tunnel:
description:
- Enable/disable overriding the WTP profile split tunneling setting.
choices:
- enable
- disable
override-wan-port-mode:
description:
- Enable/disable overriding the wan-port-mode in the WTP profile.
choices:
- enable
- disable
radio-1:
description:
- Configuration options for radio 1.
suboptions:
auto-power-high:
description:
- Automatic transmission power high limit in decibels (dB) of the measured power referenced to one milliwatt (mW), or dBm (10 - 17
dBm, default = 17).
auto-power-level:
description:
- Enable/disable automatic power-level adjustment to prevent co-channel interference (default = enable).
choices:
- enable
- disable
auto-power-low:
description:
- Automatic transmission power low limit in dBm (the actual range of transmit power depends on the AP platform type).
band:
description:
- WiFi band that Radio 1 operates on.
choices:
- 802.11a
- 802.11b
- 802.11g
- 802.11n
- 802.11n-5G
- 802.11n,g-only
- 802.11g-only
- 802.11n-only
- 802.11n-5G-only
- 802.11ac
- 802.11ac,n-only
- 802.11ac-only
channel:
description:
- Selected list of wireless radio channels.
suboptions:
chan:
description:
- Channel number.
required: true
override-analysis:
description:
- Enable to override the WTP profile spectrum analysis configuration.
choices:
- enable
- disable
override-band:
description:
- Enable to override the WTP profile band setting.
choices:
- enable
- disable
override-channel:
description:
- Enable to override WTP profile channel settings.
choices:
- enable
- disable
override-txpower:
description:
- Enable to override the WTP profile power level configuration.
choices:
- enable
- disable
override-vaps:
description:
- Enable to override WTP profile Virtual Access Point (VAP) settings.
choices:
- enable
- disable
power-level:
description:
- Radio power level as a percentage of the maximum transmit power (0 - 100, default = 100).
radio-id:
description:
- radio-id
spectrum-analysis:
description:
- Enable/disable spectrum analysis to find interference that would negatively impact wireless performance.
choices:
- enable
- disable
vap-all:
description:
- Enable/disable the automatic inheritance of all Virtual Access Points (VAPs) (default = enable).
choices:
- enable
- disable
vaps:
description:
- Manually selected list of Virtual Access Points (VAPs).
suboptions:
name:
description:
- Virtual Access Point (VAP) name. Source wireless-controller.vap-group.name wireless-controller.vap.name.
required: true
radio-2:
description:
- Configuration options for radio 2.
suboptions:
auto-power-high:
description:
- Automatic transmission power high limit in decibels (dB) of the measured power referenced to one milliwatt (mW), or dBm (10 - 17
dBm, default = 17).
auto-power-level:
description:
- Enable/disable automatic power-level adjustment to prevent co-channel interference (default = enable).
choices:
- enable
- disable
auto-power-low:
description:
- Automatic transmission power low limit in dBm (the actual range of transmit power depends on the AP platform type).
band:
description:
- WiFi band that Radio 1 operates on.
choices:
- 802.11a
- 802.11b
- 802.11g
- 802.11n
- 802.11n-5G
- 802.11n,g-only
- 802.11g-only
- 802.11n-only
- 802.11n-5G-only
- 802.11ac
- 802.11ac,n-only
- 802.11ac-only
channel:
description:
- Selected list of wireless radio channels.
suboptions:
chan:
description:
- Channel number.
required: true
override-analysis:
description:
- Enable to override the WTP profile spectrum analysis configuration.
choices:
- enable
- disable
override-band:
description:
- Enable to override the WTP profile band setting.
choices:
- enable
- disable
override-channel:
description:
- Enable to override WTP profile channel settings.
choices:
- enable
- disable
override-txpower:
description:
- Enable to override the WTP profile power level configuration.
choices:
- enable
- disable
override-vaps:
description:
- Enable to override WTP profile Virtual Access Point (VAP) settings.
choices:
- enable
- disable
power-level:
description:
- Radio power level as a percentage of the maximum transmit power (0 - 100, default = 100).
radio-id:
description:
- radio-id
spectrum-analysis:
description:
- Enable/disable spectrum analysis to find interference that would negatively impact wireless performance.
choices:
- enable
- disable
vap-all:
description:
- Enable/disable the automatic inheritance of all Virtual Access Points (VAPs) (default = enable).
choices:
- enable
- disable
vaps:
description:
- Manually selected list of Virtual Access Points (VAPs).
suboptions:
name:
description:
- Virtual Access Point (VAP) name. Source wireless-controller.vap-group.name wireless-controller.vap.name.
required: true
split-tunneling-acl:
description:
- Split tunneling ACL filter list.
suboptions:
dest-ip:
description:
- Destination IP and mask for the split-tunneling subnet.
id:
description:
- ID.
required: true
split-tunneling-acl-local-ap-subnet:
description:
- Enable/disable automatically adding local subnetwork of FortiAP to split-tunneling ACL (default = disable).
choices:
- enable
- disable
split-tunneling-acl-path:
description:
- Split tunneling ACL path is local/tunnel.
choices:
- tunnel
- local
tun-mtu-downlink:
description:
- Downlink tunnel MTU in octets. Set the value to either 0 (by default), 576, or 1500.
tun-mtu-uplink:
description:
- Uplink tunnel maximum transmission unit (MTU) in octets (eight-bit bytes). Set the value to either 0 (by default), 576, or 1500.
wan-port-mode:
description:
- Enable/disable using the FortiAP WAN port as a LAN port.
choices:
- wan-lan
- wan-only
wtp-id:
description:
- WTP ID.
required: true
wtp-mode:
description:
- WTP, AP, or FortiAP operating mode; normal (by default) or remote. A tunnel mode SSID can be assigned to an AP in normal mode but not
remote mode, while a local-bridge mode SSID can be assigned to an AP in either normal mode or remote mode.
choices:
- normal
- remote
wtp-profile:
description:
- WTP profile name to apply to this WTP, AP or FortiAP. Source wireless-controller.wtp-profile.name.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure Wireless Termination Points (WTPs), that is, FortiAPs or APs to be managed by FortiGate.
fortios_wireless_controller_wtp:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
wireless_controller_wtp:
state: "present"
admin: "discovered"
allowaccess: "telnet"
bonjour-profile: "<your_own_value> (source wireless-controller.bonjour-profile.name)"
coordinate-enable: "enable"
coordinate-latitude: "<your_own_value>"
coordinate-longitude: "<your_own_value>"
coordinate-x: "<your_own_value>"
coordinate-y: "<your_own_value>"
image-download: "enable"
index: "12"
ip-fragment-preventing: "tcp-mss-adjust"
lan:
port-mode: "offline"
port-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port1-mode: "offline"
port1-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port2-mode: "offline"
port2-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port3-mode: "offline"
port3-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port4-mode: "offline"
port4-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port5-mode: "offline"
port5-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port6-mode: "offline"
port6-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port7-mode: "offline"
port7-ssid: "<your_own_value> (source wireless-controller.vap.name)"
port8-mode: "offline"
port8-ssid: "<your_own_value> (source wireless-controller.vap.name)"
led-state: "enable"
location: "<your_own_value>"
login-passwd: "<your_own_value>"
login-passwd-change: "yes"
mesh-bridge-enable: "default"
name: "default_name_38"
override-allowaccess: "enable"
override-ip-fragment: "enable"
override-lan: "enable"
override-led-state: "enable"
override-login-passwd-change: "enable"
override-split-tunnel: "enable"
override-wan-port-mode: "enable"
radio-1:
auto-power-high: "47"
auto-power-level: "enable"
auto-power-low: "49"
band: "802.11a"
channel:
-
chan: "<your_own_value>"
override-analysis: "enable"
override-band: "enable"
override-channel: "enable"
override-txpower: "enable"
override-vaps: "enable"
power-level: "58"
radio-id: "59"
spectrum-analysis: "enable"
vap-all: "enable"
vaps:
-
name: "default_name_63 (source wireless-controller.vap-group.name wireless-controller.vap.name)"
radio-2:
auto-power-high: "65"
auto-power-level: "enable"
auto-power-low: "67"
band: "802.11a"
channel:
-
chan: "<your_own_value>"
override-analysis: "enable"
override-band: "enable"
override-channel: "enable"
override-txpower: "enable"
override-vaps: "enable"
power-level: "76"
radio-id: "77"
spectrum-analysis: "enable"
vap-all: "enable"
vaps:
-
name: "default_name_81 (source wireless-controller.vap-group.name wireless-controller.vap.name)"
split-tunneling-acl:
-
dest-ip: "<your_own_value>"
id: "84"
split-tunneling-acl-local-ap-subnet: "enable"
split-tunneling-acl-path: "tunnel"
tun-mtu-downlink: "87"
tun-mtu-uplink: "88"
wan-port-mode: "wan-lan"
wtp-id: "<your_own_value>"
wtp-mode: "normal"
wtp-profile: "<your_own_value> (source wireless-controller.wtp-profile.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_wireless_controller_wtp_data(json):
option_list = ['admin', 'allowaccess', 'bonjour-profile',
'coordinate-enable', 'coordinate-latitude', 'coordinate-longitude',
'coordinate-x', 'coordinate-y', 'image-download',
'index', 'ip-fragment-preventing', 'lan',
'led-state', 'location', 'login-passwd',
'login-passwd-change', 'mesh-bridge-enable', 'name',
'override-allowaccess', 'override-ip-fragment', 'override-lan',
'override-led-state', 'override-login-passwd-change', 'override-split-tunnel',
'override-wan-port-mode', 'radio-1', 'radio-2',
'split-tunneling-acl', 'split-tunneling-acl-local-ap-subnet', 'split-tunneling-acl-path',
'tun-mtu-downlink', 'tun-mtu-uplink', 'wan-port-mode',
'wtp-id', 'wtp-mode', 'wtp-profile']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def wireless_controller_wtp(data, fos):
vdom = data['vdom']
wireless_controller_wtp_data = data['wireless_controller_wtp']
flattened_data = flatten_multilists_attributes(wireless_controller_wtp_data)
filtered_data = filter_wireless_controller_wtp_data(flattened_data)
if wireless_controller_wtp_data['state'] == "present":
return fos.set('wireless-controller',
'wtp',
data=filtered_data,
vdom=vdom)
elif wireless_controller_wtp_data['state'] == "absent":
return fos.delete('wireless-controller',
'wtp',
mkey=filtered_data['wtp-id'],
vdom=vdom)
def fortios_wireless_controller(data, fos):
login(data, fos)
if data['wireless_controller_wtp']:
resp = wireless_controller_wtp(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"wireless_controller_wtp": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"admin": {"required": False, "type": "str",
"choices": ["discovered", "disable", "enable"]},
"allowaccess": {"required": False, "type": "str",
"choices": ["telnet", "http", "https",
"ssh"]},
"bonjour-profile": {"required": False, "type": "str"},
"coordinate-enable": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"coordinate-latitude": {"required": False, "type": "str"},
"coordinate-longitude": {"required": False, "type": "str"},
"coordinate-x": {"required": False, "type": "str"},
"coordinate-y": {"required": False, "type": "str"},
"image-download": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"index": {"required": False, "type": "int"},
"ip-fragment-preventing": {"required": False, "type": "str",
"choices": ["tcp-mss-adjust", "icmp-unreachable"]},
"lan": {"required": False, "type": "dict",
"options": {
"port-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port-ssid": {"required": False, "type": "str"},
"port1-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port1-ssid": {"required": False, "type": "str"},
"port2-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port2-ssid": {"required": False, "type": "str"},
"port3-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port3-ssid": {"required": False, "type": "str"},
"port4-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port4-ssid": {"required": False, "type": "str"},
"port5-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port5-ssid": {"required": False, "type": "str"},
"port6-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port6-ssid": {"required": False, "type": "str"},
"port7-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port7-ssid": {"required": False, "type": "str"},
"port8-mode": {"required": False, "type": "str",
"choices": ["offline", "nat-to-wan", "bridge-to-wan",
"bridge-to-ssid"]},
"port8-ssid": {"required": False, "type": "str"}
}},
"led-state": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"location": {"required": False, "type": "str"},
"login-passwd": {"required": False, "type": "str"},
"login-passwd-change": {"required": False, "type": "str",
"choices": ["yes", "default", "no"]},
"mesh-bridge-enable": {"required": False, "type": "str",
"choices": ["default", "enable", "disable"]},
"name": {"required": False, "type": "str"},
"override-allowaccess": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-ip-fragment": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-lan": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-led-state": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-login-passwd-change": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-split-tunnel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-wan-port-mode": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"radio-1": {"required": False, "type": "dict",
"options": {
"auto-power-high": {"required": False, "type": "int"},
"auto-power-level": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto-power-low": {"required": False, "type": "int"},
"band": {"required": False, "type": "str",
"choices": ["802.11a", "802.11b", "802.11g",
"802.11n", "802.11n-5G", "802.11n,g-only",
"802.11g-only", "802.11n-only", "802.11n-5G-only",
"802.11ac", "802.11ac,n-only", "802.11ac-only"]},
"channel": {"required": False, "type": "list",
"options": {
"chan": {"required": True, "type": "str"}
}},
"override-analysis": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-band": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-channel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-txpower": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-vaps": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"power-level": {"required": False, "type": "int"},
"radio-id": {"required": False, "type": "int"},
"spectrum-analysis": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"vap-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"vaps": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"radio-2": {"required": False, "type": "dict",
"options": {
"auto-power-high": {"required": False, "type": "int"},
"auto-power-level": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"auto-power-low": {"required": False, "type": "int"},
"band": {"required": False, "type": "str",
"choices": ["802.11a", "802.11b", "802.11g",
"802.11n", "802.11n-5G", "802.11n,g-only",
"802.11g-only", "802.11n-only", "802.11n-5G-only",
"802.11ac", "802.11ac,n-only", "802.11ac-only"]},
"channel": {"required": False, "type": "list",
"options": {
"chan": {"required": True, "type": "str"}
}},
"override-analysis": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-band": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-channel": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-txpower": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"override-vaps": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"power-level": {"required": False, "type": "int"},
"radio-id": {"required": False, "type": "int"},
"spectrum-analysis": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"vap-all": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"vaps": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}}
}},
"split-tunneling-acl": {"required": False, "type": "list",
"options": {
"dest-ip": {"required": False, "type": "str"},
"id": {"required": True, "type": "int"}
}},
"split-tunneling-acl-local-ap-subnet": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"split-tunneling-acl-path": {"required": False, "type": "str",
"choices": ["tunnel", "local"]},
"tun-mtu-downlink": {"required": False, "type": "int"},
"tun-mtu-uplink": {"required": False, "type": "int"},
"wan-port-mode": {"required": False, "type": "str",
"choices": ["wan-lan", "wan-only"]},
"wtp-id": {"required": True, "type": "str"},
"wtp-mode": {"required": False, "type": "str",
"choices": ["normal", "remote"]},
"wtp-profile": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
is_error, has_changed, result = fortios_wireless_controller(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
from django.db import models
from django.contrib.auth.models import User
class Address(models.Model):
address_string = models.CharField(max_length = 200, verbose_name = 'Адрес')
phone = models.CharField(max_length = 30, verbose_name = 'Телефон')
email = models.CharField(max_length = 30, verbose_name = 'email адрес')
link = models.CharField(max_length = 40, verbose_name = 'ссылка')
class Students(models.Model):
first_name = models.CharField(max_length = 100, verbose_name = 'Имя')
last_name = models.CharField(max_length = 100, verbose_name = 'Фамилия')
patronimic_name = models.CharField(max_length = 100, verbose_name = 'Отчество')
birtday = models.DateField(verbose_name = 'Дата рождения')
id_address = models.ForeignKey(Address, on_delete=models.PROTECT)
id_auth = models.ForeignKey(User, on_delete=models.CASCADE)
class City(models.Model):
name = models.CharField(max_length = 70, verbose_name = 'Название города')
id_Region = models.IntegerField
class Region(models.Model):
name = models.CharField(max_length = 90, verbose_name = 'Название региона')
|
import classla
class NER_classla():
def __init__(self):
classla.download('sr')
self.nlp = classla.Pipeline('sr')
def perform_NER(self, text):
doc = self.nlp(text)
result = []
for ent in doc.to_dict()[0][0]:
result.append({'text': ent["text"], "NER": ent["ner"], "dep_rel": ent["deprel"]})
return result
|
# Escrever um programa que leia 20 elementos numéricos inteiros negativos em uma matriz A do tipo vetor. Construir uma matriz B de mesmo tipo e dimensão, em que cada elemento deve ser o valor positivo do elemento correspondente da matriz A. Desta forma, se em A[1] estiver armazenado o elemento -3, deve estar em B[1] o valor 3, e assim por diante. Apresentar os elementos da matriz B em ordem decrescente.
A = []
B = []
for i in range(20):
Aux = 1
while(Aux > 0):
Aux = int(input('Informe um valor para a matriz A[{}]: '.format(i)))
if(Aux > 0):
print('Por favor, informe um numero negativo!')
A.append(Aux)
for i in range(20):
B.append(A[i] * -1)
B = sorted(B)
B.sort(reverse = True)
for i in range(20)
print(B[i])
|
from __future__ import absolute_import, division, print_function
import os
import matplotlib.pyplot as plt
from vivarium.core.composition import set_axes
from vivarium.library.dict_utils import get_value_from_path
def plot_diauxic_shift(timeseries, settings={}, out_dir='out'):
external_path = settings.get('external_path', ('environment',))
internal_path = settings.get('internal_path', ('cytoplasm',))
internal_counts_path = settings.get('internal_counts_path', ('cytoplasm_counts',))
reactions_path = settings.get('reactions_path', ('reactions',))
global_path = settings.get('global_path', ('global',))
time = [t/60 for t in timeseries['time']] # convert to minutes
environment = get_value_from_path(timeseries, external_path)
cell = get_value_from_path(timeseries, internal_path)
cell_counts = get_value_from_path(timeseries, internal_counts_path)
reactions = get_value_from_path(timeseries, reactions_path)
globals = get_value_from_path(timeseries, global_path)
# environment
lactose = environment['lcts_e']
glucose = environment['glc__D_e']
# internal
LacY = cell['LacY']
lacy_RNA = cell['lacy_RNA']
LacY_counts = cell_counts['LacY']
lacy_RNA_counts = cell_counts['lacy_RNA']
# reactions
glc_exchange = reactions['EX_glc__D_e']
lac_exchange = reactions['EX_lcts_e']
# global
mass = globals['mass']
# settings
environment_volume = settings.get('environment_volume')
n_cols = 2
n_rows = 4
# make figure and plot
fig = plt.figure(figsize=(n_cols * 6, n_rows * 1.5))
grid = plt.GridSpec(n_rows, n_cols)
ax1 = fig.add_subplot(grid[0, 0]) # grid is (row, column)
ax1.plot(time, glucose, label='glucose')
ax1.plot(time, lactose, label='lactose')
set_axes(ax1)
ax1.title.set_text('environment, volume = {} L'.format(environment_volume))
ax1.set_ylabel('(mM)')
ax1.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax2 = fig.add_subplot(grid[1, 0]) # grid is (row, column)
ax2.plot(time, lacy_RNA, label='lacy_RNA')
ax2.plot(time, LacY, label='LacY')
set_axes(ax2)
ax2.title.set_text('internal')
ax2.set_ylabel('(mM)')
ax2.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax3 = fig.add_subplot(grid[2, 0]) # grid is (row, column)
ax3.plot(time, mass, label='mass')
set_axes(ax3, True)
ax3.title.set_text('global')
ax3.set_ylabel('(fg)')
ax3.set_xlabel('time (min)')
ax3.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax4 = fig.add_subplot(grid[0, 1]) # grid is (row, column)
ax4.plot(time, glc_exchange, label='glucose exchange')
ax4.plot(time, lac_exchange, label='lactose exchange')
set_axes(ax4, True)
ax4.title.set_text('flux'.format(environment_volume))
ax4.set_xlabel('time (min)')
ax4.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
# save figure
fig_path = os.path.join(out_dir, 'diauxic_shift')
plt.subplots_adjust(wspace=0.6, hspace=0.5)
plt.savefig(fig_path, bbox_inches='tight')
|
# -*- coding: utf-8 -*-
"""
database paging services module.
"""
from pyrin.application.services import get_component
from pyrin.database.paging import DatabasePagingPackage
def extract_paging_params(values):
"""
extracts paging parameters from given dict and returns them as a new dict.
the values will be removed from input dict.
:param dict values: a dict to extract paging params from it.
:returns: dict(int page: page number,
int page_size: page size)
:rtype: dict
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).extract_paging_params(values)
def get_paging_params(**options):
"""
gets paging parameters from given inputs.
note that this method does not do any validation and just returns
keys as they are, even if their value is None.
:keyword int page: page number.
:keyword int page_size: page size.
:returns: tuple[int page, int page_size]
:rtype: tuple[int, int]
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).get_paging_params(**options)
def generate_paging_params(page, page_size):
"""
generates paging parameters from given inputs.
:param int page: page number.
:param int page_size: page size.
:returns: dict[int page, int page_size]
:rtype: dict
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).generate_paging_params(page,
page_size)
def inject_paging_keys(limit, offset, values):
"""
injects paging keys into given dict.
:param int limit: limit.
:param int offset: offset.
:param dict values: a dict to inject paging keys into it.
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).inject_paging_keys(
limit, offset, values)
def get_paging_keys(**options):
"""
gets paging keys from given inputs.
note that this method does not do any validation and just returns
keys as they are, even if their value is None.
:keyword int __limit__: limit value.
:keyword int __offset__: offset value.
:returns: tuple[int limit, int offset]
:rtype: tuple[int, int]
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).get_paging_keys(**options)
def disable_paging_keys(values):
"""
disables paging keys in given dict.
:param dict values: a dict to disable paging keys in it.
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).disable_paging_keys(values)
def get_paging_param_names():
"""
gets current paging param names in effect.
it returns a tuple of two items. first item is the param name for page
number and the second item is the param name for page size.
:returns: tuple[str page_number_param_name, str page_size_param_name]
:rtype: tuple[str, str]
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).get_paging_param_names()
def inject_paginator(paginator, inputs, **options):
"""
injects the given paginator into current request context.
:param PaginatorBase paginator: paginator instance to be injected.
:param dict inputs: view function inputs.
"""
return get_component(DatabasePagingPackage.COMPONENT_NAME).inject_paginator(paginator,
inputs,
**options)
|
import datetime
import arrow
from typing import List
def is_leap_year(years: int):
"""
if year is a leap year
"""
assert isinstance(years, int), "Integer required."
if ((years % 4 == 0 and years % 100 != 0) or (years % 400 == 0)):
days_sum = 366
return days_sum
else:
days_sum = 365
return days_sum
def get_all_days_of_year(years: int, format: str = "YYYY-MM-DD") -> List[str]:
"""
get all days of the year in string format
"""
start_date = '%s-1-1' % years
a = 0
all_date_list = []
days_sum = is_leap_year(int(years))
while a < days_sum:
b = arrow.get(start_date).shift(days=a).format(format)
a += 1
all_date_list.append(b)
return all_date_list
|
from django import forms
from django.core.validators import RegexValidator
# from phonenumber_field.formfields import PhoneNumberField
SHARING_FREQUENCY_CHOICES = [
("one_time", "One-Time Collection"),
("bene_action", "Only when beneficiaries take a specific action"),
("daily", "Data is shared with third-parties daily"),
("weekly", "Data is shared with third-parties weekly"),
("monthly", "Data is shared with third-parties monthly"),
("other", "Other"),
("not_shared", "Not shared"),
]
WITHDRAWN_CONSENT_CHOICES = [
("delete", "We securely delete the user data"),
("delete_on_request", "We keep the data, but will delete on user request"),
("keep", "We keep the data, and will not delete it"),
("other", "Other"),
]
APPLICATION_CATEGORIES = [
("research", "Research"),
("plan_finders", "Plan Finders"),
("agent_brokers", "Agent Brokers"),
("symptom_checker", "Symptom Checker"),
("organize_share_medical_claims", "Organize & Share Medical Claims"),
("other", "Other"),
]
class InterimProdAccessForm(forms.Form):
# card_name = forms.CharField(max_length=100, label="Cardholder Name")
# card_number = forms.CharField(max_length=50, label="Card Number")
# card_code = forms.CharField(max_length=20, label="Security Code")
# card_expirate_time = forms.CharField(max_length=100, label="Expiration (MM/YYYY)")
# Fields needed compared to https://airtable.com/app4N2CBNxgseqVyq/tbl61MNkxjOG19Aiz/viw1R5g2rbE7S2YFr?blocks=hide
# Application Category
application_name = forms.CharField()
application_description = forms.CharField()
application_url = forms.URLField()
terms_of_service_url = forms.URLField()
privacy_policy_url = forms.URLField()
us_based = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
associated_sandbox_users = forms.CharField(widget=forms.Textarea())
application_category = forms.ChoiceField(choices=APPLICATION_CATEGORIES)
point_of_contact_email = forms.EmailField()
point_of_contact_phone_number = forms.CharField(
max_length=17,
validators=[
RegexValidator(
r"^\+?1?\d{9,15}$",
message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.",
)
],
)
adheres_to_bb2_tos = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
user_discovery_path = forms.CharField(widget=forms.Textarea())
easy_to_read_pp = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
does_pp_follow_bb2_guidelines = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
doesnt_follow_pp_guidelines_reason = forms.CharField(widget=forms.Textarea())
third_party_app_data_sharing_frequency = forms.ChoiceField(
choices=SHARING_FREQUENCY_CHOICES
)
action_for_withdrawn_consent = forms.ChoiceField(choices=WITHDRAWN_CONSENT_CHOICES)
data_sharing_consent_method = forms.CharField(widget=forms.Textarea())
vendor_data_protection = forms.CharField(widget=forms.Textarea())
data_use_post_sale = forms.CharField(widget=forms.Textarea())
partner_requirements_consent = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
data_storage_technique = forms.CharField(widget=forms.Textarea())
organization_authority_assertion = forms.BooleanField(
widget=forms.CheckboxInput(attrs={"class": "ds-c-choice"})
)
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import json
import sys
def main():
if (len(sys.argv) < 2) or sys.argv[1] == "--help":
print(
""""Usage: join_scripts.py JSON_FILE1...
Example:
join_scripts.py script1.json script2.json > joined_file.json
"""
)
sys.exit(1)
filenames = sys.argv[1:]
commands = []
for curfilename in filenames:
with open(curfilename, "rt") as inpf:
commands += json.load(inpf)
json.dump(commands, sys.stdout, indent=4)
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.6 on 2021-01-07 07:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('clientes', '0002_venda_nfe_emitida'),
]
operations = [
migrations.RemoveField(
model_name='venda',
name='produtos',
),
migrations.AlterField(
model_name='venda',
name='valor',
field=models.DecimalField(decimal_places=2, default=0, max_digits=5),
),
migrations.CreateModel(
name='ItemsDoPedido',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantidade', models.FloatField()),
('desconto', models.DecimalField(decimal_places=2, max_digits=5)),
('produto', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clientes.Produto')),
('venda', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='clientes.Venda')),
],
),
]
|
import torch
from tqc.functions import quantile_huber_loss_f
from tqc import DEVICE
class Trainer(object):
def __init__(
self,
*,
actor,
critic,
critic_target,
discount,
tau,
top_quantiles_to_drop,
target_entropy,
use_acc,
lr_dropped_quantiles,
adjusted_dropped_quantiles_init,
adjusted_dropped_quantiles_max,
diff_ma_coef,
num_critic_updates,
writer
):
self.actor = actor
self.critic = critic
self.critic_target = critic_target
self.log_alpha = torch.zeros((1,), requires_grad=True, device=DEVICE)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.alpha_optimizer = torch.optim.Adam([self.log_alpha], lr=3e-4)
self.discount = discount
self.tau = tau
self.top_quantiles_to_drop = top_quantiles_to_drop
self.target_entropy = target_entropy
self.quantiles_total = critic.n_quantiles * critic.n_nets
self.total_it = 0
self.writer = writer
self.use_acc = use_acc
self.num_critic_updates = num_critic_updates
if use_acc:
self.adjusted_dropped_quantiles = torch.tensor(adjusted_dropped_quantiles_init, requires_grad=True)
self.adjusted_dropped_quantiles_max = adjusted_dropped_quantiles_max
self.dropped_quantiles_dropped_optimizer = torch.optim.SGD([self.adjusted_dropped_quantiles], lr=lr_dropped_quantiles)
self.first_training = True
self.diff_ma_coef = diff_ma_coef
def train(self, replay_buffer, batch_size=256, ptr_list=None, disc_return=None, do_beta_update=False):
if ptr_list is not None and do_beta_update:
self.update_beta(replay_buffer, ptr_list, disc_return)
for it in range(self.num_critic_updates):
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
alpha = torch.exp(self.log_alpha)
# --- Q loss ---
with torch.no_grad():
# get policy action
new_next_action, next_log_pi = self.actor(next_state)
# compute and cut quantiles at the next state
next_z = self.critic_target(next_state, new_next_action) # batch x nets x quantiles
sorted_z, _ = torch.sort(next_z.reshape(batch_size, -1))
if self.use_acc:
sorted_z_part = sorted_z[:, :self.quantiles_total - round(self.critic.n_nets * self.adjusted_dropped_quantiles.item())]
else:
sorted_z_part = sorted_z[:, :self.quantiles_total - self.top_quantiles_to_drop]
# compute target
target = reward + not_done * self.discount * (sorted_z_part - alpha * next_log_pi)
cur_z = self.critic(state, action)
critic_loss = quantile_huber_loss_f(cur_z, target.detach())
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
# --- Policy and alpha loss ---
new_action, log_pi = self.actor(state)
alpha_loss = -self.log_alpha * (log_pi + self.target_entropy).detach().mean()
actor_loss = (alpha * log_pi - self.critic(state, new_action).mean(2).mean(1, keepdim=True)).mean()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.total_it += 1
if self.total_it % 1000 == 0:
self.writer.add_scalar('learner/critic_loss', critic_loss.detach().cpu().numpy(), self.total_it)
self.writer.add_scalar('learner/actor_loss', actor_loss.detach().cpu().numpy(), self.total_it)
self.writer.add_scalar('learner/alpha_loss', alpha_loss.detach().cpu().numpy(), self.total_it)
self.writer.add_scalar('learner/alpha', alpha.detach().cpu().numpy(), self.total_it)
self.writer.add_scalar('learner/Q_estimate', cur_z.mean().detach().cpu().numpy(), self.total_it)
def update_beta(self, replay_buffer, ptr_list=None, disc_return=None):
state, action = replay_buffer.states_by_ptr(ptr_list)
disc_return = torch.FloatTensor(disc_return).to(DEVICE)
assert disc_return.shape[0] == state.shape[0]
mean_Q_last_eps = self.critic(state, action).mean(2).mean(1, keepdim=True).mean().detach()
mean_return_last_eps = torch.mean(disc_return).detach()
if self.first_training:
self.diff_mvavg = torch.abs(mean_return_last_eps - mean_Q_last_eps).detach()
self.first_training = False
else:
self.diff_mvavg = (1 - self.diff_ma_coef) * self.diff_mvavg \
+ self.diff_ma_coef * torch.abs(mean_return_last_eps - mean_Q_last_eps).detach()
diff_qret = ((mean_return_last_eps - mean_Q_last_eps) / (self.diff_mvavg + 1e-8)).detach()
aux_loss = self.adjusted_dropped_quantiles * diff_qret
self.dropped_quantiles_dropped_optimizer.zero_grad()
aux_loss.backward()
self.dropped_quantiles_dropped_optimizer.step()
self.adjusted_dropped_quantiles.data = self.adjusted_dropped_quantiles.clamp(min=0., max=self.adjusted_dropped_quantiles_max)
self.writer.add_scalar('learner/adjusted_dropped_quantiles', self.adjusted_dropped_quantiles, self.total_it)
def save(self, filename):
filename = str(filename)
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_target.state_dict(), filename + "_critic_target")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
torch.save(self.log_alpha, filename + '_log_alpha')
torch.save(self.alpha_optimizer.state_dict(), filename + "_alpha_optimizer")
def load(self, filename):
filename = str(filename)
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_target.load_state_dict(torch.load(filename + "_critic_target"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.log_alpha = torch.load(filename + '_log_alpha')
self.alpha_optimizer.load_state_dict(torch.load(filename + "_alpha_optimizer"))
|
#-*-coding:utf-8-*-
# date:2020-03-02
# Author: X.li
# function: inference & eval CenterNet only support resnet backbone
import os
import glob
import cv2
import numpy as np
import time
import shutil
import torch
import json
import matplotlib.pyplot as plt
from data_iterator import LoadImagesAndLabels
from models.decode import ctdet_decode
from utils.model_utils import load_model
from utils.post_process import ctdet_post_process
from msra_resnet import get_pose_net as resnet
from xml_writer import PascalVocWriter
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
# reference https://zhuanlan.zhihu.com/p/60707912
def draw_pr(coco_eval, label="192_288"):
pr_array1 = coco_eval.eval["precision"][0, :, 0, 0, 2]
score_array1 = coco_eval.eval['scores'][0, :, 0, 0, 2]
x = np.arange(0.0, 1.01, 0.01)
plt.xlabel("recall")
plt.ylabel("precision")
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.grid(True)
plt.plot(x, pr_array1, "b-", label=label)
for i in range(len(pr_array1)):
print("Confidence: {:.2f}, Precision: {:.2f}, Recall: {:.2f}".format(score_array1[i], pr_array1[i], x[i]))
plt.legend(loc="lower left")
plt.savefig("one_p_r.png")
def write_bbox_label(writer_x,img_shape,bbox,label):
h,w = img_shape
x1,y1,x2,y2 = bbox
x1 = min(w, max(0, x1))
x2 = min(w, max(0, x2))
y1 = min(h, max(0, y1))
y2 = min(h, max(0, y2))
writer_x.addBndBox(int(x1), int(y1), int(x2), int(y2), label, 0)
def letterbox(img, height=512, color=(31, 31, 31)):
# Resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_LINEAR)
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
return img
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
#x1、y1、x2、y2、以及score赋值
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
#每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#按照score置信度降序排序
order = scores.argsort()[::-1]
keep = [] #保留的结果框集合
while order.size > 0:
i = order[0]
keep.append(i) #保留该类剩余box中得分最高的一个
#得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
#计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] #因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * max(img.shape[0:2])) + 1 # line thickness
color = color or [random.randint(0, 255) for _ in range(3)]# color
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 2, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] # label size
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 # 字体的bbox
cv2.rectangle(img, c1, c2, color, -1) # filled rectangle
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 4, [225, 255, 255],\
thickness=tf, lineType=cv2.LINE_AA)
class CtdetDetector(object):
def __init__(self,model_arch,model_path):
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
else:
self.device = torch.device('cpu')
self.num_classes = LoadImagesAndLabels.num_classes
print('Creating model...')
head_conv_ =64
if "resnet_" in model_arch:
num_layer = int(model_arch.split("_")[1])
self.model = resnet(num_layers=num_layer, heads={'hm': self.num_classes, 'wh': 2, 'reg': 2}, head_conv=head_conv_, pretrained=False) # res_18
else:
print("model_arch error:", model_arch)
self.model = load_model(self.model, model_path)
self.model = self.model.to(self.device)
self.model.eval()
self.mean = np.array([[[0.40789655, 0.44719303, 0.47026116]]], dtype=np.float32).reshape(1, 1, 3)
self.std = np.array([[[0.2886383, 0.27408165, 0.27809834]]], dtype=np.float32).reshape(1, 1, 3)
self.class_name = LoadImagesAndLabels.class_name
self.down_ratio = 4
self.K = 100
self.vis_thresh = 0.3
self.show = True
def pre_process(self, image):
height, width = image.shape[0:2]
inp_height, inp_width = LoadImagesAndLabels.default_resolution#获取分辨率
torch.cuda.synchronize()
s1 = time.time()
inp_image = letterbox(image, height=inp_height)# 非形变图像pad
c = np.array([width / 2., height / 2.], dtype=np.float32)
s = max(height, width) * 1.0
inp_image = ((inp_image / 255. - self.mean) / self.std).astype(np.float32)
images = inp_image.transpose(2, 0, 1).reshape(1, 3, inp_height, inp_width)
images = torch.from_numpy(images)
torch.cuda.synchronize()
s2 = time.time()
# print("pre_process:".format(s2 -s1))
meta = {'c': c, 's': s, 'out_height': inp_height // self.down_ratio, 'out_width': inp_width // self.down_ratio}
return images, meta
def predict(self, images):
images = images.to(self.device)
with torch.no_grad():
torch.cuda.synchronize()
s1 = time.time()
output = self.model(images)[-1]
torch.cuda.synchronize()
s2 = time.time()
# for k, v in output.items():
# print("output:", k, v.size())
# print("inference time:", s2 - s1)
hm = output['hm'].sigmoid_()
wh = output['wh']
reg = output['reg'] if "reg" in output else None
dets = ctdet_decode(hm, wh, reg=reg, K=self.K)
torch.cuda.synchronize()
return output, dets
def post_process(self, dets, meta, scale=1):
torch.cuda.synchronize()
s1 = time.time()
dets = dets.cpu().numpy()
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(dets, [meta['c']], [meta['s']], meta['out_height'], meta['out_width'], self.num_classes)
for j in range(1, self.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
torch.cuda.synchronize()
s2 = time.time()
# print("post_process:", s2-s1)
return dets[0]
def work(self, image):
img_h, img_w = image.shape[0], image.shape[1]
torch.cuda.synchronize()
s1 = time.time()
detections = []
images, meta = self.pre_process(image)
output, dets = self.predict(images)
hm = output['hm']
dets = self.post_process(dets, meta)
detections.append(dets)
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate([detection[j] for detection in detections], axis=0).astype(np.float32)
final_result = []
for j in range(1, self.num_classes + 1):
for bbox in results[j]:
if bbox[4] >= self.vis_thresh:
x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
x1 = min(img_w, max(0, x1))
x2 = min(img_w, max(0, x2))
y1 = min(img_h, max(0, y1))
y2 = min(img_h, max(0, y2))
conf = bbox[4]
cls = self.class_name[j]
final_result.append((cls, conf, [x1, y1, x2, y2]))
# print("cost time: ", time.time() - s1)
return final_result,hm
def eval(model_arch,model_path,img_dir,gt_annot_path):
output = "output"
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if LoadImagesAndLabels.num_classes <= 5:
colors = [(55,55,250), (255,155,50), (128,0,0), (255,0,255), (128,255,128), (255,0,0)]
else:
colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, LoadImagesAndLabels.num_classes + 1)][::-1]
detector = CtdetDetector(model_arch,model_path)
print('\n/****************** Eval ****************/\n')
import tqdm
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
print("gt path: {}".format(gt_annot_path))
result_file = '../evaluation/instances_det.json'
coco = coco.COCO(gt_annot_path)
images = coco.getImgIds()
num_samples = len(images)
print('find {} samples in {}'.format(num_samples, gt_annot_path))
#------------------------------------------------
coco_res = []
for index in tqdm.tqdm(range(num_samples)):
img_id = images[index]
file_name = coco.loadImgs(ids=[img_id])[0]['file_name']
image_path = os.path.join(img_dir, file_name)
img = cv2.imread(image_path)
results,hm = detector.work(img)# 返回检测结果和置信度图
class_num = {}
for res in results:
cls, conf, bbox = res[0], res[1], res[2]
coco_res.append({'bbox': [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]], 'category_id':
LoadImagesAndLabels.class_name.index(cls), 'image_id': img_id, 'score': conf})
if cls in class_num:
class_num[cls] += 1
else:
class_num[cls] = 1
color = colors[LoadImagesAndLabels.class_name.index(cls)]
# 绘制标签&置信度
label_ = '{}:{:.1f}'.format(cls, conf)
plot_one_box(bbox, img, color=color, label=label_, line_thickness=2)
cv2.imwrite(output + "/" + os.path.basename(image_path), img)
cv2.namedWindow("heatmap", 0)
cv2.imshow("heatmap", np.hstack(hm[0].cpu().numpy()))
cv2.namedWindow("img", 0)
cv2.imshow("img", img)
key = cv2.waitKey(1)
#-------------------------------------------------
with open(result_file, 'w') as f_dump:
json.dump(coco_res, f_dump, cls=NpEncoder)
cocoDt = coco.loadRes(result_file)
cocoEval = COCOeval(coco, cocoDt, 'bbox')
# cocoEval.params.imgIds = imgIds
cocoEval.params.catIds = [1] # 1代表’Hand’类,你可以根据需要增减类别
cocoEval.evaluate()
print('\n/***************************/\n')
cocoEval.accumulate()
print('\n/***************************/\n')
cocoEval.summarize()
draw_pr(cocoEval)
def inference(model_arch,nms_flag,model_path,img_dir):
print('\n/****************** Demo ****************/\n')
flag_write_xml = False
path_det_ = './det_xml/'
if os.path.exists(path_det_):
shutil.rmtree(path_det_)
print('remove detect document ~')
if not os.path.exists(path_det_):
os.mkdir(path_det_)
output = "output"
if os.path.exists(output):
shutil.rmtree(output)
os.mkdir(output)
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
if LoadImagesAndLabels.num_classes <= 5:
colors = [(55,55,250), (255,155,50), (128,0,0), (255,0,255), (128,255,128), (255,0,0)]
else:
colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, LoadImagesAndLabels.num_classes + 1)][::-1]
detector = CtdetDetector(model_arch,model_path)
for file_ in os.listdir(img_dir):
if '.xml' in file_:
continue
print("--------------------")
img = cv2.imread(img_dir + file_)
if flag_write_xml:
shutil.copyfile(img_dir + file_,path_det_+file_)
if flag_write_xml:
img_h, img_w = img.shape[0],img.shape[1]
writer = PascalVocWriter("./",file_, (img_h, img_w, 3), localImgPath="./", usrname="RGB_HandPose_EVAL")
results,hm = detector.work(img)# 返回检测结果和置信度图
print('model_arch - {} : {}'.format(model_arch,results))
class_num = {}
nms_dets_ = []
for res in results:
cls, conf, bbox = res[0], res[1], res[2]
if flag_write_xml:
write_bbox_label(writer,(img_h,img_w),bbox,cls)
if cls in class_num:
class_num[cls] += 1
else:
class_num[cls] = 1
color = colors[LoadImagesAndLabels.class_name.index(cls)]
nms_dets_.append((bbox[0], bbox[1],bbox[2], bbox[3],conf))
# 绘制标签&置信度
if nms_flag == False:
label_ = '{}:{:.1f}'.format(cls, conf)
plot_one_box(bbox, img, color=color, label=label_, line_thickness=2)
if flag_write_xml:
writer.save(targetFile = path_det_+file_.replace('.jpg','.xml'))
if nms_flag and len(nms_dets_)>0:
#nms
keep_ = py_cpu_nms(np.array(nms_dets_), thresh=0.8)
print('keep_ : {}'.format(keep_))
for i in range(len(nms_dets_)):
if i in keep_:
bbox_conf = nms_dets_[i]
bbox_ = int(bbox_conf[0]),int(bbox_conf[1]),int(bbox_conf[2]),int(bbox_conf[3])
label_ = 'nms_Hand:{:.2f}'.format(bbox_conf[4])
plot_one_box(bbox_, img, color=(55,125,255), label=label_, line_thickness=2)
cv2.namedWindow("heatmap", 0)
cv2.imshow("heatmap", np.hstack(hm[0].cpu().numpy()))
cv2.namedWindow("img", 0)
cv2.imshow("img", img)
key = cv2.waitKey(1)
if key == 27:
break
if __name__ == '__main__':
model_arch = 'resnet_18'
model_path = './model_save/model_hand_last_'+model_arch+'.pth'# 模型路径
gt_annot_path = './hand_detect_gt.json'
img_dir = '../done/'# 测试集
nms_flag = True
Eval = True
if Eval:
eval(model_arch,model_path,img_dir,gt_annot_path)
else:
inference(model_arch,nms_flag,model_path,img_dir)
|
"""
评估指标模块
"""
import numpy as np
import abc
from matrixslow.core import Node
class Metrics(Node):
"""
评估指标算子抽象基类
"""
def __init__(self, *parents, **kargs):
# 默认情况下,metrics节点不需要保存
kargs['need_save'] = kargs.get('nees_save', False)
super().__init__(*parents, **kargs)
# 初始化节点
self.init()
def reset(self):
self.reset_value()
self.init()
@abc.abstractmethod
def init(self):
# 如何初始化节点由具体子类实现
pass
@staticmethod
def prob_to_label(prob, thresholds=0.5):
if prob.shape[0] > 1:
# 如果是多分类,预测类别为概率最大的类别
labels = np.zeros((prob.shape[0], 1))
labels[np.argmax(prob, axis=0)] = 1
else:
# 否则以thresholds为概率阈值判断类别
labels = np.where(prob < thresholds, -1, 1)
return labels
def get_jacobi(self):
# 对于评估指标节点, 计算雅可比矩阵无意义
raise NotImplementedError()
def value_str(self):
return f" {self.__class__.__name__}: {self.value:.4f}"
class Accuracy(Metrics):
"""
正确率节点
"""
def __init__(self, *parents, **kargs):
super().__init__(*parents, **kargs)
def init(self):
self.correct_num = 0
self.total_num = 0
def compute(self):
"""
计算Accuracy: (TP + TN) / TOTAL
这里假设第一个父节点是预测值(概率),第二个父节点是标签
"""
pred = Metrics.prob_to_label(self.parents[0].value)
gt = self.parents[1].value
assert len(pred) == len(gt)
if pred.shape[0] > 1:
self.correct_num += np.sum(np.multiply(pred, gt))
self.total_num += pred.shape[1]
else:
self.correct_num += np.sum(pred == gt)
self.total_num += len(pred)
self.value = 0
if self.total_num != 0:
self.value = self.correct_num / self.total_num
class Precision(Metrics):
"""
查准率节点
"""
def __init__(self, *parents, **kargs):
Metrics.__init__(self, *parents, **kargs)
def init(self):
self.true_pos_num = 0
self.pred_pos_num = 0
def compute(self):
"""
计算Precision: TP / (TP + FP)
"""
assert self.parents[0].value.shape[1] == 1
pred = Metrics.prob_to_label(self.parents[0].value)
gt = self.parents[1].value
self.pred_pos_num += np.sum(pred == 1)
self.true_pos_num += np.sum(pred == gt and pred == 1)
self.value = 0
if self.pred_pos_num != 0:
self.value = self.true_pos_num / self.pred_pos_num
class Recall(Metrics):
"""
召回率节点
"""
def __init__(self, *parents, **kargs):
super().__init__(*parents, **kargs)
def init(self):
self.gt_pos_num = 0
self.true_pos_num = 0
def compute(self):
"""
计算Recall: TP / (TP + FN)
"""
assert self.parents[0].value.shape[1] == 1
pred = Metrics.prob_to_label(self.parents[0].value)
gt = self.parents[1].value
self.gt_pos_num += np.sum(gt == 1)
self.true_pos_num += np.sum(pred == gt and pred == 1)
self.value = 0
if self.gt_pos_num != 0:
self.value = self.true_pos_num / self.gt_pos_num
class ROC(Metrics):
"""
ROC曲线
"""
def __init__(self, *parents, **kargs):
super().__init__(*parents, **kargs)
def init(self):
self.count = 100
self.gt_pos_num = 0
self.gt_neg_num = 0
self.true_pos_num = np.array([0] * self.count)
self.false_pos_num = np.array([0] * self.count)
self.tpr = np.array([0] * self.count)
self.fpr = np.array([0] * self.count)
def compute(self):
prob = self.parents[0].value
gt = self.parents[1].value
self.gt_pos_num += np.sum(gt == 1)
self.gt_neg_num += np.sum(gt == -1)
# 最小值0.01,最大值0.99,步长0.01,生成99个阈值
thresholds = list(np.arange(0.01, 1.00, 0.01))
# 分别使用多个阈值产生类别预测,与标签相比
for index in range(0, len(thresholds)):
pred = Metrics.prob_to_label(prob, thresholds[index])
self.true_pos_num[index] += np.sum(pred == gt and pred == 1)
self.false_pos_num[index] += np.sum(pred != gt and pred == 1)
# 分别计算TPR和FPR
if self.gt_pos_num != 0 and self.gt_neg_num != 0:
self.tpr = self.true_pos_num / self.gt_pos_num
self.fpr = self.false_pos_num / self.gt_neg_num
def value_str(self):
return ''
def show(self):
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('TkAgg')
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot(self.fpr, self.tpr)
plt.show()
class ROC_AUC(Metrics):
"""
ROC AUC
"""
def __init__(self, *parents, **kargs):
super().__init__(*parents, **kargs)
def init(self):
self.gt_pos_preds = []
self.gt_neg_preds = []
def compute(self):
prob = self.parents[0].value
gt = self.parents[1].value
# 简单起见,假设只有一个元素
if gt[0, 0] == 1:
self.gt_pos_preds.append(prob)
else:
self.gt_neg_preds.append(prob)
self.total = len(self.gt_pos_preds) * len(self.gt_neg_preds)
def value_str(self):
count = 0
# 遍历m x n个样本对,计算正类概率大于负类概率的数量
for gt_pos_pred in self.gt_pos_preds:
for gt_neg_pred in self.gt_neg_preds:
if gt_pos_pred > gt_neg_pred:
count += 1
# 使用这个数量,除以m x n
self.value = count / self.total
return f"{self.__class__.__name__}: {self.value:.4f} "
class F1Score(Metrics):
"""
F1 Score算子
"""
def __init__(self, *parents, **kargs):
super().__init__(*parents, **kargs)
self.true_pos_num = 0
self.pred_pos_num = 0
self.gt_pos_num = 0
def compute(self):
"""
计算f1-score: (2 * pre * recall) / (pre + recall)
"""
assert self.parents[0].value.shape[1] == 1
pred = Metrics.prob_to_label(self.parents[0].value)
gt = self.parents[1].value
self.gt_pos_num += np.sum(gt)
self.pred_pos_num += np.sum(pred)
self.true_pos_num += np.multiply(pred, gt).sum()
self.value = 0
pre_score = 0
recall_score = 0
if self.pred_pos_num != 0:
pre_score = self.true_pos_num / self.pred_pos_num
if self.gt_pos_num != 0:
recall_score = self.true_pos_num / self.gt_pos_num
self.value = 0
if pre_score + recall_score != 0:
self.value = (2 * np.multiply(pre_score, recall_score)
/ (pre_score + recall_score))
|
#!/usr/bin/env python3
#
# pushbullet-notify
#
# (c) 2016 Daniel Jankowski
import os
import json
import argparse
import urllib.parse
import urllib.request
CONFIG_PATH = '~/.pushbullet-notify.conf'
def send_push(title, message, access_token, iden):
# Do this for every device to inform mutliple devices
for device in iden:
# Build post data
post_data = {
'device_iden': device,
'type': 'note',
'title': title,
'body': message
}
# Build request
request = urllib.request.Request('https://api.pushbullet.com/v2/pushes',
urllib.parse.urlencode(post_data).encode('utf-8'))
request.add_header("Access-Token", access_token)
# Send request
data = urllib.request.urlopen(request).read()
def send_request(url, access_token):
# Build request
request = urllib.request.Request('https://api.pushbullet.com/v2/' + url)
request.add_header("Access-Token", access_token)
# Send request and get data
data = urllib.request.urlopen(request).read()
data = json.loads(data.decode('utf-8'))
# List only specific data from the api
for device in data['devices']:
print('{nickname}({type}) - {iden}'.format(nickname=device['nickname'],
type=device['type'], iden=device['iden']))
def main():
# Argument Parser
parser = argparse.ArgumentParser()
parser.add_argument('-d', action='store_true', help='Get device identifier')
parser.add_argument('-t', type=str, help='Title of the message')
parser.add_argument('-m', type=str, help='Body of the message')
parser.add_argument('-c', '--config', type=str, help='Define config file')
args = parser.parse_args()
if args.config:
config_path = args.config
else:
config_path = CONFIG_PATH
# Read config file
config_path = os.path.expanduser(os.path.normpath(config_path))
print(config_path)
if not os.path.isfile(config_path):
print('Error! Cannot read the config file')
return 0
# Read config file
with open(config_path, 'r') as fp:
config = json.load(fp)
if args.d: # List all devices to get the identifier
send_request('devices', config['application_token'])
return
else: # Send push to device
if args.t is None:
print('Error! Missing title')
return 0
if args.m is None:
message = ''
else:
message = args.m
send_push(args.t, message, config['application_token'],
config['device_iden'])
return
if __name__ == '__main__':
main()
|
import pyserial
import time
import paho.mqtt.client as mqtt
import json
import uuid
import smbus
import math
from datetime import datetime
#opening serial port
ser=serial.Serial('/dev/ttyACM0')
#Set the variables for connecting to the iot service
broker = ""
deviceId="arduino" #you can give the address as default also
topic = "iot-2/evt/arduino/fmt/json"
username = "use-token-auth"
password = "superauth" #auth-token
organization = "lscroe" #org_id
deviceType = "accel"
topic = "iot-2/evt/status/fmt/json"
#Creating the client connection
#Set clientID and broker
clientID = "d:" + organization + ":" + deviceType + ":" + deviceId
broker = organization + ".messaging.internetofthings.ibmcloud.com"
mqttc = mqtt.Client(clientID)
#Set authentication values, if connecting to registered service
if username is not "":
mqttc.username_pw_set(username, password=password)
mqttc.connect(host=broker, port=1883, keepalive=60)
#Publishing to IBM Internet of Things Foundation
mqttc.loop_start()
while mqttc.loop() == 0:
analog_in = ser.readline().split()
accel_xout = int(analog_in[0])
accel_yout = int(analog_in[1])
accel_zout = int(analog_in[2])
accel_xout_scaled = 9.8*(accel_xout-350 / 350)
accel_yout_scaled = 9.8*(accel_yout-350 / 350)
accel_zout_scaled = 9.8*(accel_zout-350 / 350)
time_stamp=time.time()
msg = json.JSONEncoder().encode({"d":{"measured_timestamp":time_stamp, "accel_xout_scaled":accel_xout_scaled, "accel_yout_scaled":accel_yout_scaled, "accel_zout_scaled":accel_zout_scaled}})
mqttc.publish(topic, payload=msg, qos=1, retain=False)
print "message published ", time_stamp
time.sleep(0.01)
pass
|
import json
import os
import logging
import platform
import subprocess
import sys
import time
from urllib import quote
from urlparse import urlparse
from assemblyline.al.common.service_utils import get_merged_svc_config
from assemblyline.al.common.transport import ftp, local, http
from assemblyline.common.importing import module_attribute_by_name
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
NODETYPE_CORE = 'core'
NODETYPE_RIAK = 'riak'
NODETYPE_WORKER = 'worker'
class PackageFetcher(object):
TYPE_FTP = 'ftp'
TYPE_SFTP = 'sftp'
TYPE_HTTP = 'http'
TYPE_LOCAL = 'local'
TYPE_S3 = 's3'
VALID_TYPES = [TYPE_FTP, TYPE_LOCAL, TYPE_S3, TYPE_SFTP, TYPE_HTTP]
def __init__(self, config, alsi):
if not config:
raise Exception("Missing or invalid configuration for external_packages.")
self._transports = {}
self.cfg = config
print "========\n= PackageFetcher endpoints"
for realm, realm_cfg in self.cfg.iteritems():
transport_type = realm_cfg['transport']
if transport_type not in self.VALID_TYPES:
raise Exception("Invalid package transport type: %s" % transport_type)
if transport_type == self.TYPE_FTP:
ftpargs = realm_cfg['args']
self._transports[realm] = ftp.TransportFTP(
base=ftpargs['base'], host=ftpargs['host'],
password=ftpargs['password'], user=ftpargs['user'])
print "[{realm}]\n{type}://{user}:{password}@{host}{base}".format(realm=realm,
type=self.TYPE_FTP,
user=ftpargs['user'],
password=ftpargs['password'],
host=ftpargs['host'],
base=ftpargs['base'])
elif transport_type == self.TYPE_SFTP:
try:
from assemblyline.al.common.transport import sftp
except ImportError:
alsi.milestone("Installing SFTP transport dependancies")
alsi.sudo_apt_install("python-pip")
alsi.pip_install_all(['setuptools==24.0.2', 'paramiko==2.0.1', 'pysftp==0.2.9'])
from assemblyline.al.common.transport import sftp
sftpargs = realm_cfg['args']
self._transports[realm] = sftp.TransportSFTP(**sftpargs)
out = "[{realm}]\n{type}://".format(realm=realm, type=self.TYPE_SFTP)
if 'user' in sftpargs and 'password' in sftpargs:
out += "{user}:{password}@".format(user=sftpargs['user'], password=sftpargs['password'])
out += "{host}{base}".format(host=sftpargs['host'], base=sftpargs['base'])
first_param = True
for k in ["private_key", "validate_host", "private_key_pass"]:
if k in sftpargs:
if first_param:
out += "?{key}={val}".format(key=k, val=sftpargs[k])
else:
out += "&{key}={val}".format(key=k, val=sftpargs[k])
print out
elif transport_type == self.TYPE_HTTP:
httpargs = realm_cfg['args']
self._transports[realm] = http.TransportHTTP(**httpargs)
out = "[{realm}]\n{type}://".format(realm=realm, type=self.TYPE_HTTP)
if 'user' in httpargs and 'password' in httpargs:
out += "{user}:{password}@".format(user=httpargs['user'], password=httpargs['password'])
out += "{host}{base}".format(host=httpargs['host'], base=httpargs['base'])
first_param = True
for k in ["pki"]:
if k in httpargs:
if first_param:
out += "?{key}={val}".format(key=k, val=httpargs[k])
else:
out += "&{key}={val}".format(key=k, val=httpargs[k])
print out
elif transport_type == self.TYPE_LOCAL:
base_dir = realm_cfg['args']['base']
self._transports[realm] = local.TransportLocal(base=base_dir)
print "[{realm}]\n{type}://{base}".format(realm=realm, type=self.TYPE_LOCAL, base=base_dir)
elif transport_type == self.TYPE_S3:
try:
from assemblyline.al.common.transport import s3
except ImportError:
alsi.milestone("Installing Amazon S3 Dependencies...")
alsi.sudo_apt_install("python-pip")
alsi.pip_install_all(["boto3==1.4.4", 'botocore==1.5.62'])
from assemblyline.al.common.transport import s3
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
s3args = realm_cfg['args']
self._transports[realm] = s3.TransportS3(**s3args)
print "[{realm}]\n{type}://{accesskey}:{secretkey}" \
"@{host}/{s3_bucket}".format(realm=realm,
type=self.TYPE_S3,
accesskey=s3args['accesskey'],
secretkey=s3args['secretkey'],
s3_bucket=s3args['s3_bucket'],
base=s3args['base'],
host=s3args.get('host', s3.TransportS3.DEFAULT_HOST))
else:
raise Exception("Transport not implemented: %s" % transport_type)
print "========"
def fetch(self, relpath, localpath, realm='assemblyline'):
return self._transports[realm].download(relpath, localpath)
class SiteInstaller(object):
def __init__(self, seed=None, simple=False):
if not seed:
seed = os.environ.get('AL_SEED', None)
self.log = logging.getLogger('assemblyline.install')
self.initial_seed = seed
self.config = None
self.seed_module = None
self.reload_config()
if self.config['system'].get('shell_bypass', False):
SiteInstaller.runcmd("sudo ln -s /bin/sh /tmp/notashell", raise_on_error=False)
self.shell = "/tmp/notashell"
else:
self.shell = "sh"
self.alroot = self.config['system']['root']
# cheap logging hooks for now
self.info = self.log.info
self.error = self.log.error
self.warn = self.log.warn
self.exception = self.log.exception
if not simple:
self._pipper = PipInstaller(pypi_index_url=self.config['installation']['pip_index_url'])
self._package_fetcher = PackageFetcher(self.config['installation']['external_packages'], self)
self.install_temp = os.path.join(self.alroot, 'var/.installtmp')
if not os.path.exists(self.install_temp):
os.makedirs(self.install_temp)
else:
self.install_temp = "/tmp"
self._package_fetcher = None
self._pipper = None
def reload_config(self):
self.seed_module = None
if isinstance(self.initial_seed, dict):
self.config = self.initial_seed
elif self.initial_seed:
self.config = module_attribute_by_name(self.initial_seed)
self.seed_module = self.initial_seed
services_to_register = self.config['services']['master_list']
for service, svc_detail in services_to_register.iteritems():
self.config['services']['master_list'][service] = get_merged_svc_config(service, svc_detail, self.log)
else:
from assemblyline.al.common import config_riak
self.config = config_riak.load_seed()
def fatal(self, s):
def red(st):
prefix = '\x1b[' + '31m'
suffix = '\x1b[0m'
return prefix + st + suffix
self.log.error(red(s))
def get_nodetypes_from_seed(self):
types = []
# noinspection PyBroadException
try:
ip = self.get_ipaddress()
hostname = self.get_hostname()
except:
ip = "127.0.0.1"
hostname = "localhost"
if ip in self.config['core']['nodes'] or \
hostname in self.config['core']['nodes'] or \
'localhost' in self.config['core']['nodes'] or \
'127.0.0.1' in self.config['core']['nodes']:
types.append(NODETYPE_CORE)
if ip in self.config['datastore']['riak']['nodes'] or \
hostname in self.config['datastore']['riak']['nodes'] or \
'localhost' in self.config['datastore']['riak']['nodes'] or \
'127.0.0.1' in self.config['datastore']['riak']['nodes']:
types.append(NODETYPE_RIAK)
if ip in self.config['workers']['nodes'] or \
hostname in self.config['workers']['nodes'] or \
'localhost' in self.config['workers']['nodes'] or \
'127.0.0.1' in self.config['workers']['nodes']:
types.append(NODETYPE_WORKER)
return types
def setup_git_repos(self, root_git_list=None, site_specific_git_list=None, service_git_list=None,
git_override=None):
install_dir = os.path.realpath(__file__).split(os.path.join('assemblyline', 'al', 'install'))[0]
installation = self.config['installation']
site_spec = self.config['sitespecific']
services = self.config['services']['master_list']
internal_repo = None
if NODETYPE_CORE not in self.get_nodetypes_from_seed():
internal_repo = self.config['system']['internal_repository']
if root_git_list is None:
root_git_list = installation.get('repositories', {}).get('repos', {}).keys()
if site_specific_git_list is None:
site_specific_git_list = site_spec.get('repositories', {}).keys()
if service_git_list is None:
service_git_list = services.keys()
if not os.path.exists(os.path.join(install_dir, "al_services")):
os.makedirs(os.path.join(install_dir, "al_services"))
open(os.path.join(install_dir, "al_services", "__init__.py"), 'a').close()
realm_urls = {}
realm_branchs = {}
for name, realm in installation.get('repositories', {}).get('realms', {}).iteritems():
if git_override:
realm_urls[name] = git_override['url']
realm_branchs[name] = git_override['branch']
elif internal_repo:
realm_url = internal_repo['url']
if not realm_url.endswith("/"):
realm_url += "/"
realm_urls[name] = realm_url + "{repo}"
realm_branchs[name] = internal_repo['branch']
else:
if realm['url'].lower().startswith("http"):
if realm['user'] and realm['password']:
scheme, url = realm['url'].split('://', 1)
realm_url = "%s://%s:%s@%s" % (scheme, realm['user'], quote(realm['password']), url)
else:
realm_url = realm['url']
elif realm['url'].lower().startswith("git") or realm['url'].lower().startswith("ssh"):
if realm['key']:
ssh_dir = os.path.expanduser("~/.ssh/")
if not os.path.exists(os.path.join(ssh_dir, name)):
with open(os.path.join(ssh_dir, name), 'wb') as realm_pub_file:
realm_pub_file.write(realm['key'])
ssh_config = os.path.join(ssh_dir, 'config')
host, url = realm['url'][4:].split(":", 1)
if not self.grep_quiet(ssh_config, "HostName %s" % host, sudo=False):
config_block = "Host %s\n\tHostName %s\n\tUser git\n\tIdentityFile ~/.ssh/%s" % (name,
host,
name)
self.runcmd('echo "' + config_block + '" >> ' + ssh_config)
realm_url = realm['url']
elif os.path.exists(realm['url']):
# Local git path
realm_url = realm['url']
else:
self.fatal("Invalid realm %s:\n%s" % (name, str(realm)))
exit(1)
if not realm_url.endswith("/"):
realm_url += "/"
realm_urls[name] = realm_url + "{repo}.git"
realm_branchs[name] = realm['branch']
for repo in root_git_list:
repo_realm = installation.get('repositories', {}).get('repos', {}).get(repo, {}).get('realm', {})
if repo_realm:
self._clone_or_seturl(repo, realm_urls[repo_realm], realm_branchs[repo_realm], install_dir)
for svc in service_git_list:
service = services.get(svc, {})
repo = service.get('repo', None)
if internal_repo:
repo = "al_services/" + repo
repo_realm = service.get('realm', None)
if repo and repo_realm:
self._clone_or_seturl(repo,
realm_urls[repo_realm],
realm_branchs[repo_realm],
os.path.join(install_dir, "al_services"))
if 'depends' in service:
depend_repo = service['depends'].get('repo', None)
if internal_repo:
depend_repo = "al_services/" + depend_repo
depend_realm = service['depends'].get('realm', None)
if depend_repo and depend_realm:
self._clone_or_seturl(depend_repo,
realm_urls[depend_realm],
realm_branchs[depend_realm],
os.path.join(install_dir, "al_services"))
for repo in site_specific_git_list:
repo_realm = site_spec.get('repositories', {}).get(repo, {}).get('realm', {})
if repo_realm:
self._clone_or_seturl(repo, realm_urls[repo_realm], realm_branchs[repo_realm], install_dir)
def _clone_or_seturl(self, repo, realm_url, branch, location):
if os.path.exists(os.path.join(location, repo)):
cmd = "git remote set-url origin %s" % realm_url.format(repo=repo)
self.runcmd(cmd, shell=True, cwd=os.path.join(location, repo), raise_on_error=False)
cmd = "git checkout %s" % branch
self.runcmd(cmd, shell=True, cwd=os.path.join(location, repo), raise_on_error=False)
cmd = "git pull"
self.runcmd(cmd, shell=True, cwd=os.path.join(location, repo), raise_on_error=False)
else:
cmd = "git clone %s -b %s" % (realm_url.format(repo=repo), branch)
self.runcmd(cmd, shell=True, cwd=location, raise_on_error=False)
def install_persistent_pip_conf(self):
# only necessary if we have an explicit pip configuration
pip_url = self.config['installation']['pip_index_url']
if not pip_url:
self.milestone("No explicit pip configuration specified")
return
self.milestone("Updating pip configuration files to point to %s" % pip_url)
pip_dir = os.path.expanduser('~/.pip/')
if not os.path.exists(pip_dir):
os.makedirs(pip_dir)
with open(os.path.join(pip_dir, 'pip.conf'), 'wb') as piprc:
piprc.write("[global]\n")
piprc.write("index-url=%s\n" % pip_url)
with open(os.path.expanduser("~/.pydistutils.cfg"), 'wb') as pydistutils:
pydistutils.write("[easy_install]\n")
pydistutils.write("index-url=%s\n" % pip_url)
self.runcmd("sudo mkdir /root/.pip/", raise_on_error=False)
self.runcmd("sudo cp %s /root/.pip/pip.conf" % os.path.join(pip_dir, 'pip.conf'))
self.runcmd("sudo cp %s /root/.pydistutils.cfg" % os.path.expanduser("~/.pydistutils.cfg"))
self.milestone("Pip configuration updated!")
return
def milestone(self, s):
def green(st):
prefix = '\x1b[' + '32m'
suffix = '\x1b[0m'
return prefix + st + suffix
self.log.info(green(s))
def assert_al_in_pythonpath(self):
al_pkg_root = self.config['system']['root'] + '/pkg'
if al_pkg_root not in sys.path and not al_pkg_root + '/' in sys.path:
raise Exception("AL root not found in python path. Have you updated PYTHONPATH in your bashrc ?")
def fetch_package(self, relpath, localpath='.', realm='assemblyline'):
self.info("Fetching '{package}' from realm '{realm}'".format(package=relpath, realm=realm))
self._package_fetcher.fetch(relpath, localpath, realm)
def check_log_prerequisites(self):
mach = platform.machine()
if mach != 'x86_64':
self.log.warn('Be warned: You are installing on a non stardard machine: %s', mach)
(dist, version, name) = platform.linux_distribution()
if dist != 'Ubuntu' or version != '14.04':
self.log.warn('Be warned: You are installing on an unsupported linux distribution: %s',
'-'.join([dist, version, name]))
@staticmethod
def runcmd(cmdline, shell=True, raise_on_error=True, piped_stdio=True, silent=False, cwd=None):
return _runcmd(cmdline, shell, raise_on_error, piped_stdio, silent=silent, cwd=cwd)
def assert_running_in_python_venv(self):
import sys
if not hasattr(sys, 'real_prefix'):
self.log.error("You are not running with the AL virtualenv. Aborting.")
exit(1)
def remove_apparmor(self):
self.milestone('.....Disabling apparmor.')
if os.path.exists('/lib/apparmor'):
self.runcmd('sudo service apparmor stop')
self.runcmd('sudo update-rc.d -f apparmor remove')
self.runcmd('sudo apt-get -y remove apparmor')
def pip_install(self, package):
self.milestone('.....pip installing:' + package)
self._pipper.install(package)
def pip_install_all(self, packages):
for p in packages:
self.milestone('.....pip installing:' + p)
self._pipper.install_all(packages)
def pip_refresh(self):
self.milestone("Refreshing pip command...")
self._pipper.resfresh_pip_install_cmd()
def pip_upgrade(self, package):
self.milestone('.....pip installing:' + package)
self._pipper.upgrade(package)
def pip_upgrade_all(self, packages):
for p in packages:
self.milestone('.....pip installing:' + p)
self._pipper.upgrade_all(packages)
@staticmethod
def grep_quiet(filename, content, sudo=True):
if sudo:
cmdline = "sudo "
else:
cmdline = ""
cmdline += 'grep -q \"' + content + '\" ' + filename
rc, _, _ = _runcmd(cmdline, raise_on_error=False)
return rc == 0
def append_line_if_doesnt_exist(self, filename, line):
if not self.grep_quiet(filename, "^" + line):
self.runcmd('sudo ' + self.shell + ' -c \'echo \"' + line + '\" >> ' + filename + "'")
@staticmethod
def get_hostname(silent=False):
_, hostname, _ = SiteInstaller.runcmd('hostname -A', silent=silent)
hostname = hostname.strip()
if not hostname:
_, hostname, _ = SiteInstaller.runcmd('hostname -f', silent=silent)
hostname = hostname.strip()
return hostname
@staticmethod
def get_username(silent=False):
_, uname, _ = SiteInstaller.runcmd('whoami', silent=silent)
return uname.strip()
@staticmethod
def get_ipaddress(silent=False):
_, ip, _ = SiteInstaller.runcmd('ip route get to 255.255.255.255 | sed -e "s/.*src //" | head -n 1',
silent=silent)
return ip.strip()
def sudo_apt_install(self, packages):
apt_args = ['sudo', 'DEBIAN_FRONTEND=noninteractive', 'apt-get', '-y', '-q', 'install']
cmd_line = apt_args
if isinstance(packages, list):
cmd_line.extend(packages)
for p in packages:
self.milestone('.....apt installing:' + p)
else:
cmd_line.append(packages)
self.milestone('.....apt installing:' + packages)
(_, _, _) = self.runcmd(cmd_line, shell=False)
def sudo_sed_inline(self, fname, expression_list, check_exist=True, create_backup=True):
if check_exist and not os.path.isfile(fname):
raise Exception("No such file to sed_inline: %s", fname)
cmdline = 'sudo sed -i'
if create_backup:
cmdline += ".bak "
else:
cmdline += " "
for expression in expression_list:
cmdline += " -e '" + expression + "'"
cmdline += ' ' + fname
self.milestone('\tUpdating:' + fname)
self.runcmd(cmdline)
# if src is a relative path, add the alroot directory as prefix
def sudo_install_file(self, src, dst, backup=False):
if not os.path.isabs(src):
src = os.path.join(self.config['system']['root'], 'pkg', src)
self.milestone('\tInstalling file:' + dst)
self._install_file(src, dst, backup, sudo=True)
def install_file(self, src, dst, backup=False):
if not os.path.isabs(src):
src = os.path.join(self.config['system']['root'], 'pkg', src)
self.milestone('\tInstalling file:' + dst)
self._install_file(src, dst, backup, sudo=False)
def execute_core_preinstall_hook(self):
hook_paths = self.config['installation']['hooks'].get('core_pre', [])
if not hook_paths:
return
if isinstance(hook_paths, str):
raise Exception("install_hooks must be a list")
for hook_path in hook_paths:
import importlib
hook_module = importlib.import_module(hook_path)
if not hasattr(hook_module, 'execute'):
self.warn("Specified hook as no execute method. Aborting hook: %s\n%s" %
(hook_path, str(dir(hook_module))))
return
# grab the hook call it
hook_cb = getattr(hook_module, 'execute')
hook_cb(self)
def execute_ui_preinstall_hook(self):
hook_paths = self.config['installation']['hooks'].get('ui_pre', [])
if not hook_paths:
return
if isinstance(hook_paths, str):
raise Exception("install_hooks must be a list")
import importlib
for hook_path in hook_paths:
hook_module = importlib.import_module(hook_path)
if not hasattr(hook_module, 'execute'):
self.warn("Specified hook as no execute method. Aborting hook: %s\n" % hook_path)
return
# grab the hook method and call it
hook_cb = getattr(hook_module, 'execute')
hook_cb(self)
def execute_riak_preinstall_hook(self):
hook_paths = self.config['installation']['hooks'].get('riak_pre', [])
if not hook_paths:
return
if isinstance(hook_paths, str):
raise Exception("install_hooks must be a list")
for hook_path in hook_paths:
import importlib
hook_module = importlib.import_module(hook_path)
if not hasattr(hook_module, 'execute'):
self.warn("Specified hook as no execute method. Aborting hook: %s\n%s" % hook_path)
return
# grab the hook call it
hook_cb = getattr(hook_module, 'execute')
hook_cb(self)
def _install_file(self, src, dst, backup, sudo):
cmdprefix = 'install -D '
if backup:
suffix = time.strftime('%FT%T')
cmdprefix = 'install -bDS .{suffix} '.format(suffix=suffix)
cmdline = '{cmdprefix} {local} {dst}'.format(cmdprefix=cmdprefix, local=src, dst=dst)
if sudo:
# noinspection PyAugmentAssignment
cmdline = 'sudo ' + cmdline
self.runcmd(cmdline)
def install_yara_3(self):
# Check if yara-python version already installed
try:
cmd_output = self.runcmd('pip show yara-python')[1]
except:
cmd_output = ''
if "Version: 3.6.3" not in cmd_output:
wd = os.getcwd()
local_yara_support = os.path.join(self.alroot, 'support/yara/')
local_yara_python = os.path.join(local_yara_support, 'yara-python-3.6.3.tar.gz')
self.fetch_package('yara/yara-python-3.6.3.tar.gz', local_yara_python)
os.chdir(local_yara_support)
self.runcmd("tar -zxf yara-python-3.6.3.tar.gz")
os.chdir(os.path.join(local_yara_support, "yara-python-3.6.3"))
self.runcmd("python setup.py build --enable-dotnet")
self.runcmd("sudo python setup.py install")
os.chdir(wd)
def install_oracle_java8(self):
self.milestone("Installing Oracle Java 8...")
self.sudo_apt_install([
'java-common',
])
_, _, stderr = self.runcmd("java -version", raise_on_error=False)
if "1.8.0_72" not in stderr:
jdk = "jdk-8u72-linux-x64.tar.gz"
installer = "oracle-java8-installer_8u72+8u71arm-1-webupd8-0_all.deb"
defaults = "oracle-java8-set-default_8u72+8u71arm-1-webupd8-0_all.deb"
jdk_remote = "oracle/%s" % jdk
installer_remote = "oracle/%s" % installer
defaults_remote = "oracle/%s" % defaults
local_jdk = os.path.join('/tmp/jdk_installer/', jdk)
local_installer = os.path.join('/tmp/jdk_installer/', installer)
local_defaults = os.path.join('/tmp/jdk_installer/', defaults)
self.fetch_package(jdk_remote, local_jdk)
self.fetch_package(installer_remote, local_installer)
self.fetch_package(defaults_remote, local_defaults)
self.runcmd("sudo mkdir /var/cache/oracle-jdk8-installer/")
self.runcmd("sudo ln -s %s /var/cache/oracle-jdk8-installer/jdk-8u72-linux-x64.tar.gz" % local_jdk)
self.runcmd("echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | "
"sudo /usr/bin/debconf-set-selections")
self.runcmd("sudo dpkg -i %s" % local_installer)
self.runcmd("sudo dpkg -i %s" % local_defaults)
else:
self.info("Oracle Java 8 already installed. Skipping...")
self.milestone("Oracle Java 8 installation completed!")
def install_docker(self):
self.milestone("Installing Docker...")
self.sudo_apt_install("software-properties-common")
self.runcmd('sudo add-apt-repository "%s"' % self.config['installation']['docker']['apt_repo_info'])
self.runcmd("wget -q %s -O- | sudo apt-key add -" % self.config['installation']['docker']['apt_repo_key_url'])
self.runcmd("sudo apt-get update -m", raise_on_error=False)
self.sudo_apt_install(['docker-engine'])
self.pip_install('docker-py')
self.runcmd("sudo gpasswd -a %s docker" % self.get_username())
self.runcmd("sudo gpasswd -a %s docker" % self.config['system']['user'])
if 'private_registry' in self.config['installation']['docker']:
if 'private_registry_key' in self.config['installation']['docker']:
self.runcmd("sudo mkdir /usr/local/share/ca-certificates/docker-dev-cert", raise_on_error=False)
self.runcmd('echo "%s" | sudo tee /usr/local/share/ca-certificates/docker-dev-cert/devdockerCA.crt' %
self.config['installation']['docker']['private_registry_key'])
self.runcmd("sudo mkdir -p /etc/docker/certs.d/%s/" %
self.config['installation']['docker']['private_registry'], raise_on_error=False)
self.runcmd('sudo cp /usr/local/share/ca-certificates/docker-dev-cert/devdockerCA.crt '
'/etc/docker/certs.d/%s/ca.crt' % self.config['installation']['docker']['private_registry'])
self.runcmd("sudo update-ca-certificates")
else:
self.runcmd('echo \'DOCKER_OPTS="--insecure-registry %s"\' | sudo tee -a /etc/default/docker' %
self.config['installation']['docker']['private_registry'])
if 'private_registry_auth' in self.config['installation']['docker']:
docker_cfg_dir = os.path.join(self.config['system']['root'], '.docker/')
docker_cfg_path = os.path.join(docker_cfg_dir, 'config.json')
docker_cfg_temp = "/tmp/docker_cfg"
registry_uri = "https://%s" % self.config['installation']['docker']['private_registry']
if not os.path.exists(docker_cfg_dir):
self.runcmd("sudo mkdir %s" % docker_cfg_dir)
if os.path.exists(docker_cfg_path):
with open(docker_cfg_path, 'r') as fh:
docker_cfg = json.load(fh)
else:
docker_cfg = {}
if 'auths' not in docker_cfg:
docker_cfg["auths"] = {}
docker_cfg["auths"][registry_uri] = {
"auth": self.config['installation']['docker']['private_registry_auth'],
"email": "",
}
# Just to avoid echo destroying our string..
with open('/tmp/docker_cfg', 'w') as fh:
json.dump(docker_cfg, fh, indent=4)
self.runcmd('sudo mv %s %s' % (docker_cfg_temp, docker_cfg_path))
self.runcmd("sudo service docker restart")
self.milestone("Docker installation completed!")
def symlink(self, src, dst):
if not os.path.isabs(src):
src = os.path.join(self.alroot, 'pkg', src)
if not os.path.isabs(dst):
dst = os.path.join(self.alroot, 'pkg', dst)
if os.path.exists(dst):
# already links
return
self.info("Linking %s --> %s", src, dst)
try:
os.symlink(src, dst)
except OSError, os_err:
if os_err.errno == 17:
pass
else:
raise
def install_pefile(self):
# pefile 1.2.10-114 is not provided by pypi anymore therefor the following won't work
# self.pip_install('pefile==1.2.10-114')
# until we've tested newer versions of pefile, we will install it from an sdist package
pefile_pkg = 'pefile-1.2.10-114.tar.gz'
remote_path = 'python/pip/' + pefile_pkg
local_path = os.path.join('/tmp/', pefile_pkg)
self.fetch_package(remote_path, local_path)
self.runcmd('sudo -H pip install ' + local_path, piped_stdio=False)
def _runcmd(cmdline, shell=True, raise_on_error=True, piped_stdio=True, silent=False, cwd=None):
if not silent:
if not cwd:
print "Running: %s" % cmdline
else:
print "Running: %s (%s)" % (cmdline, cwd)
if piped_stdio:
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell, cwd=cwd)
else:
p = subprocess.Popen(cmdline, shell=shell, cwd=cwd)
stdout, stderr = p.communicate()
rc = p.returncode
if raise_on_error and rc != 0:
raise Exception("FAILED: return_code:%s\nstdout:\n%s\nstderr:\n%s" % (rc, stdout, stderr))
return rc, stdout, stderr
def assert_windows2008_r2():
expected = '2008ServerR2'
found = platform.win32_ver()[0]
if found == expected:
return
raise Exception('Platform Assertion Failure. Found: %s vs %s' % (found, expected))
class PipInstaller(object):
def __init__(self, pypi_index_url=None):
self.indexurl = pypi_index_url
self.pip = 'pip'
self.pip_install_cmd = self._get_pip_install_cmd()
def _get_pip_install_cmd(self):
if 'linux' in platform.system().lower():
pip_install_cmd = ['sudo', '-H', self.pip, 'install']
else:
pip_install_cmd = [self.pip, 'install']
if self.indexurl:
pip_install_cmd.append('--index-url=' + self.indexurl)
if self.require_trusted_host():
host = urlparse(self.indexurl).hostname
pip_install_cmd.append('--trusted-host=' + host)
return pip_install_cmd
def install(self, package):
assert(isinstance(package, str))
_, out, _ = _runcmd(self.pip_install_cmd + [package], shell=False)
def install_all(self, packages):
assert(isinstance(packages, list))
_, out, _ = _runcmd(self.pip_install_cmd + packages, shell=False)
# noinspection PyUnresolvedReferences
def require_trusted_host(self):
try:
rc, out, _ = _runcmd([self.pip, '-V'], raise_on_error=False, shell=False, silent=True)
if rc == 0:
pip_ver = out.split(" from")[0].replace("pip ", "")
return int(pip_ver[0]) >= 8
else:
return False
except OSError:
return False
def resfresh_pip_install_cmd(self):
self.pip_install_cmd = self._get_pip_install_cmd()
def upgrade(self, package):
assert(isinstance(package, str))
_, out, _ = _runcmd(self.pip_install_cmd + ['--upgrade', package], shell=False)
def upgrade_all(self, packages):
assert(isinstance(packages, list))
_, out, _ = _runcmd(self.pip_install_cmd + ['--upgrade'] + packages, shell=False)
|
from types import FunctionType
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
cast,
)
from graphql import is_named_type, value_from_ast_untyped
from graphql.execution.values import get_argument_values
from graphql.language import DirectiveLocation
from graphql.type import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNamedType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
GraphQLUnionType,
)
from typing_extensions import Literal, Protocol
VisitableSchemaType = Union[
GraphQLSchema,
GraphQLObjectType,
GraphQLInterfaceType,
GraphQLInputObjectType,
GraphQLNamedType,
GraphQLScalarType,
GraphQLField,
GraphQLArgument,
GraphQLUnionType,
GraphQLEnumType,
GraphQLEnumValue,
]
V = TypeVar("V", bound=VisitableSchemaType)
VisitableMap = Dict[str, V]
IndexedObject = Union[VisitableMap, List[V]]
Callback = Callable[..., Any]
def each(list_or_dict: IndexedObject, callback: Callback):
if isinstance(list_or_dict, List):
for value in list_or_dict:
callback(value)
else:
for key, value in list_or_dict.items():
callback(value, key)
def update_each_key(object_map: VisitableMap, callback: Callback):
"""
The callback can return None to leave the key untouched, False to remove
the key from the array or object, or a non-null V to replace the value.
"""
keys_to_remove: List[str] = []
for key, value in object_map.copy().items():
result = callback(value, key)
if result is False:
keys_to_remove.append(key)
elif result is None:
continue
else:
object_map[key] = result
for k in keys_to_remove:
object_map.pop(k)
class SchemaVisitor(Protocol):
@classmethod
def implements_visitor_method(cls, method_name: str):
if not method_name.startswith("visit_"):
return False
try:
method = getattr(cls, method_name)
except AttributeError:
return False
if not isinstance(method, FunctionType):
return False
if cls.__name__ == "SchemaVisitor":
# The SchemaVisitor class implements every visitor method.
return True
if method.__qualname__.startswith("SchemaVisitor"):
# When SchemaVisitor subclass does not really implement the method.
return False
return True
# pylint: disable=unused-argument
def visit_schema(self, schema: GraphQLSchema) -> None:
pass
def visit_scalar(self, scalar: GraphQLScalarType) -> GraphQLScalarType:
pass
def visit_object(self, object_: GraphQLObjectType) -> GraphQLObjectType:
pass
def visit_field_definition(
self,
field: GraphQLField,
object_type: Union[GraphQLObjectType, GraphQLInterfaceType],
) -> GraphQLField:
pass
def visit_argument_definition(
self,
argument: GraphQLArgument,
field: GraphQLField,
object_type: Union[GraphQLObjectType, GraphQLInterfaceType],
) -> GraphQLArgument:
pass
def visit_interface(self, interface: GraphQLInterfaceType) -> GraphQLInterfaceType:
pass
def visit_union(self, union: GraphQLUnionType) -> GraphQLUnionType:
pass
def visit_enum(self, type_: GraphQLEnumType) -> GraphQLEnumType:
pass
def visit_enum_value(
self, value: GraphQLEnumValue, enum_type: GraphQLEnumType
) -> GraphQLEnumValue:
pass
def visit_input_object(
self, object_: GraphQLInputObjectType
) -> GraphQLInputObjectType:
pass
def visit_input_field_definition(
self, field: GraphQLInputField, object_type: GraphQLInputObjectType
) -> GraphQLInputField:
pass
def visit_schema(
schema: GraphQLSchema,
visitor_selector: Callable[
[VisitableSchemaType, str], List["SchemaDirectiveVisitor"]
],
) -> GraphQLSchema:
"""
Helper function that calls visitor_selector and applies the resulting
visitors to the given type, with arguments [type, ...args].
"""
def call_method(
method_name: str, type_: VisitableSchemaType, *args: Any
) -> Union[VisitableSchemaType, Literal[False]]:
for visitor in visitor_selector(type_, method_name):
new_type = getattr(visitor, method_name)(type_, *args)
if new_type is None:
# Keep going without modifying type.
continue
if method_name == "visit_schema" or isinstance(type_, GraphQLSchema):
raise ValueError(
f"Method {method_name} cannot replace schema with {new_type}"
)
if new_type is False:
# Stop the loop and return False form call_method, which will cause
# the type to be removed from the schema.
del type_
return False
# Update type to the new type returned by the visitor method, so that
# later directives will see the new type, and call_method will return
# the final type.
type_ = new_type
# If there were no directives for this type object, or if all visitor
# methods returned nothing, type will be returned unmodified.
return type_
def visit( # pylint: disable=too-many-return-statements
type_: VisitableSchemaType
) -> Union[VisitableSchemaType, Literal[False]]:
"""
Recursive helper function that calls any appropriate visitor methods for
each object in the schema, then traverses the object's children (if any).
"""
if isinstance(type_, GraphQLSchema):
# Unlike the other types, the root GraphQLSchema object cannot be
# replaced by visitor methods, because that would make life very hard
# for SchemaVisitor subclasses that rely on the original schema object.
call_method("visit_schema", type_)
def _start(named_type, type_name):
if not type_name.startswith("__"):
visit(named_type)
update_each_key(type_.type_map, _start)
return type_
if isinstance(type_, GraphQLObjectType):
# Note that call_method('visit_object', type_) may not actually call any
# methods, if there are no @directive annotations associated with this
# type, or if this SchemaDirectiveVisitor subclass does not override
# the visit_object method.
new_object = cast(GraphQLObjectType, call_method("visit_object", type_))
if new_object:
visit_fields(new_object)
return new_object
if isinstance(type_, GraphQLInterfaceType):
new_interface = cast(
GraphQLInterfaceType, call_method("visit_interface", type_)
)
if new_interface:
visit_fields(new_interface)
return new_interface
if isinstance(type_, GraphQLInputObjectType):
new_input_object = cast(
GraphQLInputObjectType, call_method("visit_input_object", type_)
)
if new_input_object:
update_each_key(
new_input_object.fields,
lambda field, n: call_method(
"visit_input_field_definition", field, new_input_object
),
)
return new_input_object
if isinstance(type_, GraphQLScalarType):
return call_method("visit_scalar", type_)
if isinstance(type_, GraphQLUnionType):
return call_method("visit_union", type_)
if isinstance(type_, GraphQLEnumType):
new_enum = cast(GraphQLEnumType, call_method("visit_enum", type_))
if new_enum:
update_each_key(
new_enum.values,
lambda value, name: call_method("visit_enum_value", value, name),
)
return new_enum
raise ValueError(f"Unexpected schema type: {type_}")
def visit_fields(type_: Union[GraphQLObjectType, GraphQLInterfaceType]):
def _update_fields(field, _):
# It would be nice if we could call visit(field) recursively here, but
# GraphQLField is merely a type, not a value that can be detected using
# an instanceof check, so we have to visit the fields in this lexical
# context, so that TypeScript can validate the call to
# visit_field_definition.
new_field = call_method("visit_field_definition", field, type_)
# While any field visitor needs a reference to the field object, some
# field visitors may also need to know the enclosing (parent) type,
# perhaps to determine if the parent is a GraphQLObjectType or a
# GraphQLInterfaceType. To obtain a reference to the parent, a
# visitor method can have a second parameter, which will be reeferring
# to the parent.
if new_field and new_field.args:
update_each_key(
new_field.args,
lambda arg, _: call_method(
"visit_argument_definition", arg, new_field, type_
),
)
return new_field
update_each_key(type_.fields, _update_fields)
visit(schema)
# Return the original schema for convenience, even though it cannot have
# been replaced or removed by the code above.
return schema
def directive_location_to_visitor_method_name(loc: DirectiveLocation):
"""
Convert a string like "FIELD_DEFINITION" to "visit_field_definition".
"""
return "visit_" + loc.name.lower()
class SchemaDirectiveVisitor(SchemaVisitor):
def __init__(self, name, args, visited_type, schema, context):
self.name = name
self.args = args
self.visited_type = visited_type
self.schema = schema
self.context = context
@classmethod
def get_directive_declaration(cls, directive_name: str, schema: GraphQLSchema):
return schema.get_directive(directive_name)
@classmethod
def get_declared_directives(
cls,
schema: GraphQLSchema,
directive_visitors: Dict[str, Type["SchemaDirectiveVisitor"]],
):
declared_directives: Dict[str, GraphQLDirective] = {}
def _add_directive(decl):
declared_directives[decl.name] = decl
each(schema.directives, _add_directive)
# If the visitor subclass overrides get_directive_declaration, and it
# returns a non-null GraphQLDirective, use that instead of any directive
# declared in the schema itself. Reasoning: if a SchemaDirectiveVisitor
# goes to the trouble of implementing get_directive_declaration, it should
# be able to rely on that implementation.
def _get_overriden_directive(visitor_class, directive_name):
decl = visitor_class.get_directive_declaration(directive_name, schema)
if decl:
declared_directives[directive_name] = decl
each(directive_visitors, _get_overriden_directive)
def _rest(decl, name):
if not name in directive_visitors:
# SchemaDirectiveVisitors.visit_schema_directives might be called
# multiple times with partial directive_visitors maps, so it's not
# necessarily an error for directive_visitors to be missing an
# implementation of a directive that was declared in the schema.
return
visitor_class = directive_visitors[name]
def _location_check(loc):
visitor_method_name = directive_location_to_visitor_method_name(loc)
if SchemaVisitor.implements_visitor_method(
visitor_method_name
) and not visitor_class.implements_visitor_method(visitor_method_name):
# While visitor subclasses may implement extra visitor methods,
# it's definitely a mistake if the GraphQLDirective declares itself
# applicable to certain schema locations, and the visitor subclass
# does not implement all the corresponding methods.
raise ValueError(
f"SchemaDirectiveVisitor for @{name} must"
f"implement {visitor_method_name} method"
)
each(decl.locations, _location_check)
each(declared_directives, _rest)
return declared_directives
@classmethod
def visit_schema_directives(
cls,
schema: GraphQLSchema,
directive_visitors: Dict[str, Type["SchemaDirectiveVisitor"]],
*,
context: Optional[Dict[str, Any]] = None,
) -> Mapping[str, List["SchemaDirectiveVisitor"]]:
declared_directives = cls.get_declared_directives(schema, directive_visitors)
# Map from directive names to lists of SchemaDirectiveVisitor instances
# created while visiting the schema.
created_visitors: Dict[str, List["SchemaDirectiveVisitor"]] = {
k: [] for k in directive_visitors
}
def _visitor_selector(
type_: VisitableSchemaType, method_name: str
) -> List["SchemaDirectiveVisitor"]:
visitors: List["SchemaDirectiveVisitor"] = []
directive_nodes = type_.ast_node.directives if type_.ast_node else None
if directive_nodes is None:
return visitors
for directive_node in directive_nodes:
directive_name = directive_node.name.value
if directive_name not in directive_visitors:
continue
visitor_class = directive_visitors[directive_name]
# Avoid creating visitor objects if visitor_class does not override
# the visitor method named by method_name.
if not visitor_class.implements_visitor_method(method_name):
continue
decl = declared_directives[directive_name]
args: Dict[str, Any] = {}
if decl:
# If this directive was explicitly declared, use the declared
# argument types (and any default values) to check, coerce, and/or
# supply default values for the given arguments.
args = get_argument_values(decl, directive_node)
else:
# If this directive was not explicitly declared, just convert the
# argument nodes to their corresponding values.
for arg in directive_node.arguments:
args[arg.name.value] = value_from_ast_untyped(arg.value)
# As foretold in comments near the top of the visit_schema_directives
# method, this is where instances of the SchemaDirectiveVisitor class
# get created and assigned names. While subclasses could override the
# constructor method, the constructor is marked as protected, so
# these are the only arguments that will ever be passed.
visitors.append(
visitor_class(directive_name, args, type_, schema, context)
)
for visitor in visitors:
created_visitors[visitor.name].append(visitor)
return visitors
visit_schema(schema, _visitor_selector)
# Automatically update any references to named schema types replaced
# during the traversal, so implementors don't have to worry about that.
heal_schema(schema)
return created_visitors
NamedTypeMap = Dict[str, GraphQLNamedType]
def heal_schema(schema: GraphQLSchema) -> GraphQLSchema:
def heal(type_: VisitableSchemaType):
if isinstance(type_, GraphQLSchema):
original_type_map: NamedTypeMap = type_.type_map
actual_named_type_map: NamedTypeMap = {}
def _heal_original(named_type, type_name):
if type_name.startswith("__"):
return None
actual_name = named_type.name
if actual_name.startswith("__"):
return None
if actual_name in actual_named_type_map:
raise ValueError(f"Duplicate schema type name {actual_name}")
actual_named_type_map[actual_name] = named_type
# Note: we are deliberately leaving named_type in the schema by its
# original name (which might be different from actual_name), so that
# references by that name can be healed.
return None
# If any of the .name properties of the GraphQLNamedType objects in
# schema.type_map have changed, the keys of the type map need to
# be updated accordingly.
each(original_type_map, _heal_original)
# Now add back every named type by its actual name.
def _add_back(named_type, type_name):
original_type_map[type_name] = named_type
each(actual_named_type_map, _add_back)
# Directive declaration argument types can refer to named types.
def _heal_directive_declaration(decl: GraphQLDirective):
def _heal_arg(arg, _):
arg.type = heal_type(arg.type)
if decl.args:
each(decl.args, _heal_arg)
each(type_.directives, _heal_directive_declaration)
def _heal_type(named_type, type_name):
if not type_name.startswith("__"):
heal(named_type)
each(original_type_map, _heal_type)
# Dangling references to renamed types should remain in the schema
# during healing, but must be removed now, so that the following
# invariant holds for all names: schema.get_type(name).name === name
def _remove_dangling_references(_, type_name):
if (
not type_name.startswith("__")
and type_name not in actual_named_type_map
):
return False
return None
update_each_key(original_type_map, _remove_dangling_references)
elif isinstance(type_, GraphQLObjectType):
heal_fields(type_)
each(type_.interfaces, heal)
elif isinstance(type_, GraphQLInterfaceType):
heal_fields(type_)
elif isinstance(type_, GraphQLInputObjectType):
def _heal_field_type(field, _):
field.type = heal_type(field.type)
each(type_.fields, _heal_field_type)
elif isinstance(type_, GraphQLScalarType):
# Nothing to do.
pass
elif isinstance(type_, GraphQLUnionType):
each(type_.types, heal_type)
elif isinstance(type_, GraphQLEnumType):
# Nothing to do.
pass
else:
raise ValueError(f"Unexpected schema type: {type_}")
def heal_fields(type_: Union[GraphQLObjectType, GraphQLInterfaceType]):
def _heal_arg(arg, _):
arg.type = heal_type(arg.type)
def _heal_field(field, _):
field.type = heal_type(field.type)
if field.args:
each(field.args, _heal_arg)
each(type_.fields, _heal_field)
def heal_type(type_: GraphQLNamedType) -> GraphQLNamedType:
# Unwrap the two known wrapper types
if isinstance(type_, GraphQLList):
type_ = GraphQLList(heal_type(type_.of_type))
elif isinstance(type_, GraphQLNonNull):
type_ = GraphQLNonNull(heal_type(type_.of_type))
elif is_named_type(type_):
# If a type annotation on a field or an argument or a union member is
# any `GraphQLNamedType` with a `name`, then it must end up identical
# to `schema.get_type(name)`, since `schema.type_map` is the source
# of truth for all named schema types.
named_type = cast(GraphQLNamedType, type_)
official_type = schema.get_type(named_type.name)
if official_type and named_type != official_type:
return official_type
return type_
heal(schema)
return schema
|
# -*- coding: utf-8 -*-
import pytest
from django.test import override_settings
from shuup.admin.views.wizard import WizardView
from shuup.testing import factories
from shuup.testing.utils import apply_request_middleware
@pytest.mark.django_db
def test_wizard_pane(rf, admin_user, settings):
with override_settings(
SHUUP_SETUP_WIZARD_PANE_SPEC=["shuup.admin.modules.service_providers.views.PaymentWizardPane"]):
factories.get_default_shop()
factories.get_default_tax_class()
request = apply_request_middleware(rf.get("/"), user=admin_user)
response = WizardView.as_view()(request)
assert response.status_code == 200
|
import numpy as np
import scipy.stats as st
import scipy.special as scps
from scipy.optimize import minimize
from functools import partial
import utils
import random
from math import factorial
class MertonPricer():
"""
European option Merton pricer
"""
def __init__(self):
#Parameters
self.mu = None
self.sig = None
self.lam = None
self.muJ = None
self.sigJ = None
#Mean correcting martingale
self.mcm = None
#AIC
self.aic = None
def Merton_density(self,x, T, mu, sig, lam, muJ, sigJ,nterms=150):
serie = 0
for k in range(nterms):
serie += (lam*T)**k * np.exp(-(x-mu*T-k*muJ)**2/( 2*(T*sig**2+k*sigJ**2) ) ) \
/ (factorial(k) * np.sqrt(2*np.pi * (sig**2*T+k*sigJ**2) ) )
return np.exp(-lam*T) * serie
def cf_mert(self,u, t, mu, sig, lam, muJ, sigJ):
return np.exp( t * ( 1j * u * mu - 0.5 * u**2 * sig**2 \
+ lam*( np.exp(1j*u*muJ - 0.5 * u**2 * sigJ**2) -1 ) ) )
def log_likely_Merton(self,x, data, T):
return (-1) * np.sum( np.log(self.Merton_density(data, T, x[0], x[1], x[2], x[3], x[4]) ))
def fit(self,data,T):
cons = [{'type':'ineq', 'fun': lambda x: x[1]},
{'type':'ineq', 'fun': lambda x: x[4]}]
a =minimize(self.log_likely_Merton, x0=[data.mean(),data.std(),2,data.mean(),data.std()],
method='Nelder-Mead', args=(data,T) , constraints=cons)
self.mu, self.sig, self.lam, self.muJ, self.sigJ = a["x"]
self.mcm = 0.5*self.sig**2 + self.lam * (np.exp(self.muJ + (self.sigJ**2)/2) -1)
self.mcm = np.log(self.cf_mert( u = -1j,
t = 1,
mu = self.mu,
sig = self.sig,
lam = self.lam,
muJ = self.muJ,
sigJ = self.sigJ))
self.aic = 2*5+ 2*a["fun"]
def mcPricer(self,K,r,S0,payoff,N,T):
W = st.norm.rvs(0, 1, N) #Gaussian part
P = st.poisson.rvs(self.lam*T, size=N) #Poisson number of arrivals
Jumps = np.asarray([st.norm.rvs(self.muJ, self.sigJ, ind).sum() for ind in P ]) # Jumps
S_T = S0 * np.exp( (r - self.mcm )*T + np.sqrt(T)*self.sig*W + Jumps ) # Martingale exponential
S_T= S_T.reshape((N,1))
option = np.mean( np.exp(-r*T) * utils.payoff(S=S_T,K=K,payoff=payoff), axis=0 )[0] # Mean
option_error = st.sem( np.exp(-r*T) * utils.payoff(S=S_T,K=K,payoff=payoff), axis=0 )[0] # Standar error of mean
return option.real, option_error
|
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
# Vistrails initialization file
##############################################################################
##############################################################################
# Basic configuration
# Comment this to bypass the logging mechanism
# configuration.nologger = True
# Uncomment this to prevent VisTrails's splash screen from appearing
# configuration.showSplash = False
# Uncomment this to enable VisTrails's python shell by default
# configuration.pythonPrompt = True
# Uncomment this to switch to the non-caching execution model
# configuration.useCache = False
# Uncomment this to start VisTrails with maximized windows
# configuration.maximizeWindows = True
# Uncomment this if you run multiple monitors, to start VisTrails
# with different windows in different monitors
# configuration.multiHeads = True
# Set verbosenessLevel to 1 or 2 to enable dumping of non-critical warnings
# and information messages to stderr.
# configuration.verbosenessLevel = 1 # 2#!/usr/bin/env python
# Vistrails initialization file
################################################################################
##############################################################################
# VisTrails packages.
# VisTrails packages are collections of modules that provide user-specified
# functionality to VisTrails. Use addPackage to let VisTrails know which
# packages you want enabled.
# Interpackage dependencies must currently be handled manually by the user.
# For example, the spreadsheet package depends on VTK for some functionality,
# so if you want that functionality, you should add the vtk package before
# the spreadsheet package.
# the vtk package is the main visualization package for VisTrails
addPackage('vtk')
# pythonCalc is an example package intended simply to demonstrate how to
# create new packages
addPackage('pythonCalc')
# ImageMagick uses the ImageMagick command-line suite to perform various
# tasks on images (conversion, filtering, etc).
#addPackage('ImageMagick')
# The spreadsheet package enables the Visualization Spreadsheet
addPackage('spreadsheet')
# The URL package provides an easy way to download files and use them as
# regular files in VisTrails pipelines.
addPackage('URL')
#matplotlib/pylab package for plotting and histograms
addPackage('pylab')
################################################################################
# Hooks
# Currently, there is only one hook in VisTrails: the startup hook. By adding
# arbitrary callables to the startup hook, it is possible to run user-defined
# code after all packages have been initialized, but before VisTrails runs.
# This is intended to show that it is possible to have user-defined code
# in specific places in VisTrails. If you think you need a hook somewhere that
# we haven't allowed yet, please let us know, and we'll include it in a future
# release.
def testHook():
"""Prints the Module class hierarchy to stdout."""
def printTree(n, indent = 0):
def iprint(str):
print '%s%s' % (" " * indent, str)
iprint('Class: %s' % n.descriptor.name)
for c in n.children:
printTree(c, indent+4)
import modules
import modules.module_registry
t = modules.module_registry.registry.classTree
printTree(t)
# Uncomment this line to install the startup hook
# addStartupHook(testHook)
##############################################################################
# If you have an appropriate Qt license, you can install signal inspectors,
# which might make debugging a whole lot easier. To do that, uncomment the
# following lines.
# import qt
# connections = {}
# def connectHandler(*args):
# """This handler writes all signal connections to /tmp/signalslotnames.txt"""
# emitter = args[0].__class__.__name__
# signal = args[1]
# f = signal.find('(')
# if f == -1:
# signal = signal[1:]
# else:
# signal = signal[1:f]
# try:
# receiver = args[2].im_class.__name__
# slot = args[2].im_func.__name__
# except AttributeError:
# receiver = args[2].__self__.__class__.__name__
# slot = args[2].__class__.__name__
# entry = (emitter, signal, receiver, slot)
# print entry
# global connections
# try:
# connections[emitter].add((signal, receiver, slot))
# except:
# connections[emitter] = set(((signal, receiver, slot),))
# signals = {}
# slots = {}
# sig_count = 1
# slot_count = 1
# f = file('/tmp/connections.txt', 'w')
# f.write('digraph {\n')
# for (k, v) in connections.iteritems():
# print k, v
# recs = {}
# for (sig, rec, sl) in v:
# if not signals.has_key(sig):
# signals[sig] = sig_count
# sig_count += 1
# if not slots.has_key(sl):
# slots[sl] = slot_count
# slot_count += 1
# try:
# recs[rec].append( str(signals[sig]) + ':' + str(slots[sl]))
# except:
# recs[rec] = [str(signals[sig]) + ':' + str(slots[sl])]
# for rec, sigslotlist in recs.iteritems():
# f.write('%s -> %s [label = "%s"];\n' % (k, rec, ";".join(sigslotlist)))
# # if not entry in connections:
# # f = file('/tmp/connections.txt', 'a')
# # f.write("%s %s %s\n" % emi)
# # f.close()
# # connections.add(entry)
# f.write('}\n')
# f.close()
# f = file('/tmp/signalslotnames.txt', 'w')
# sigs = [(v, k) for (k, v) in signals.items()]
# sigs.sort()
# sls = [(v, k) for (k, v) in slots.items()]
# sls.sort()
# f.write('signals: \n')
# for (k,v) in sigs:
# f.write('%s: %s\n' % (k, v))
# f.write('slots: \n')
# for (k,v) in sls:
# f.write('%s: %s\n' % (k, v))
# This line hooks connectHandler to Qt's signals. You can use user-defined
# code here.
# qt.enableSignalDebugging(connectCall = connectHandler)
|
from direct.distributed import DistributedObject
class HolidayManager(DistributedObject.DistributedObject):
__module__ = __name__
#+++ okay decompyling
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import operator
import uuid
from functools import reduce
from itertools import chain
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.models import ADDITION, CHANGE, LogEntry
from django.contrib.auth.decorators import permission_required
from django.core import signing
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.forms.formsets import formset_factory
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
from django.views.generic import DetailView, FormView, ListView
from django.views.decorators.http import require_POST
from ..attendees.exporters import BadgeExporter
from ..attendees.models import (Purchase, Ticket, TicketType, SIMCardTicket,
SupportTicket, VenueTicket)
from ..attendees.tasks import render_invoice
from ..attendees.utils import generate_invoice_number
from ..conference.models import current_conference
from .exporters import generate_badge
from .forms import (OnDeskPurchaseForm, EditOnDeskTicketForm,
NewOnDeskTicketForm, BaseOnDeskTicketFormSet, SearchForm, get_users,
get_sponsors)
def ctype(obj):
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(obj)
class CheckinViewMixin(object):
@method_decorator(permission_required('accounts.see_checkin_info'))
def dispatch(self, *args, **kwargs):
return super(CheckinViewMixin, self).dispatch(*args, **kwargs)
class SearchFormMixin(object):
def get_context_data(self, **kwargs):
context = super(SearchFormMixin, self).get_context_data(**kwargs)
context['search_form'] = SearchForm(self.request.GET)
return context
class SearchView(CheckinViewMixin, SearchFormMixin, ListView):
template_name = 'checkin/search.html'
model = Ticket
context_object_name = 'results'
search_fields = (
'user__id',
'user__username',
'user__email',
'user__profile__full_name',
'user__profile__display_name',
'id',
'purchase__id',
'purchase__company_name',
'purchase__first_name',
'purchase__last_name',
'purchase__email',
'purchase__invoice_number',
'purchase__user__id',
'purchase__user__username',
'purchase__user__email',
'purchase__user__profile__full_name',
'purchase__user__profile__display_name',
'simcardticket__first_name',
'simcardticket__last_name',
'venueticket__first_name',
'venueticket__last_name',
)
def get_context_data(self, **kwargs):
context = super(SearchView, self).get_context_data(**kwargs)
context['searched'] = 'query' in self.request.GET
return context
def get_queryset(self):
queryset = self.model.objects.select_related(
'user',
'user__profile',
'purchase',
'purchase__user',
'purchase__user__profile',
'simcardticket',
'venueticket',
'ticket_type',
'ticket_type__content_type',
).filter(
models.Q(simcardticket__isnull=False) |
models.Q(venueticket__isnull=False)
)
for term in self.search_terms:
queries = [
models.Q(**{search_field + '__icontains': term})
for search_field in self.search_fields
]
queryset = queryset.filter(reduce(operator.or_, queries))
return queryset
def get(self, *args, **kwargs):
self.object_list = []
self.search_terms = self.request.GET.get('query', '').split()
if self.search_terms:
tickets = self.get_queryset()
for ticket in tickets:
obj = {
'ticket': {
'id': ticket.id,
}
}
if ticket.user is None:
obj['ticket'].update({
'full_name': ticket.real_ticket.first_name + ' ' + ticket.real_ticket.last_name,
'organisation': getattr(ticket.real_ticket, 'organisation', None)
})
else:
obj['ticket'].update({
'user_id': ticket.user_id,
'username': ticket.user.username,
'email': ticket.user.email,
'full_name': ticket.user.profile.full_name,
'display_name': ticket.user.profile.display_name,
'organisation': ticket.user.profile.organisation
})
obj['purchase'] = {
'id': ticket.purchase.id,
'company_name': ticket.purchase.company_name,
'invoice_number': ticket.purchase.invoice_number,
'name': ticket.purchase.first_name + ' ' + ticket.purchase.last_name,
'email': ticket.purchase.email
}
if ticket.purchase.user_id:
obj['buyer'] = {
'user_id': ticket.purchase.user_id,
'username': ticket.purchase.user.username,
'email': ticket.purchase.user.email,
'full_name': ticket.purchase.user.profile.full_name,
'display_name': ticket.purchase.user.profile.display_name,
'organisation': ticket.purchase.user.profile.organisation
}
self.object_list.append(obj)
context = self.get_context_data(
search_terms=self.search_terms,
object_list=self.object_list
)
return self.render_to_response(context)
search_view = SearchView.as_view()
class OnDeskPurchaseView(CheckinViewMixin, SearchFormMixin, FormView):
form_class = OnDeskPurchaseForm
salt = 'pyconde.checkin.purchase'
stage = 'form'
template_name = 'checkin/ondesk_purchase_form.html'
template_name_preview = 'checkin/ondesk_purchase_form_preview.html'
ticket_formset_class = BaseOnDeskTicketFormSet
ticket_form_class = NewOnDeskTicketForm
timeout = 15*60 # seconds after which the preview timed out
@method_decorator(permission_required('accounts.perform_purchase'))
def dispatch(self, *args, **kwargs):
return super(OnDeskPurchaseView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
formset_class = formset_factory(self.ticket_form_class,
formset=self.ticket_formset_class,
extra=1)
formset = formset_class()
return self.render_to_response(self.get_context_data(form=form,
formset=formset))
def post(self, request, *args, **kwargs):
if self.request.POST.get('signed_data', None) is not None:
# Verify existing session
if not self.verify_session():
messages.error(request, _('Purchase session timeout or purchase already processed'))
return HttpResponseRedirect(reverse('checkin_purchase'))
# We do the actual submit
return self.form_post()
else:
self.start_session()
# We perform the preview
form_class = self.get_form_class()
form = self.get_form(form_class)
formset_class = formset_factory(self.ticket_form_class,
formset=self.ticket_formset_class,
extra=1)
formset = formset_class(data=self.request.POST)
valid = (form.is_valid(), formset.is_valid(),)
if all(valid):
return self.form_valid(form, formset)
else:
return self.form_invalid(form, formset)
def form_post(self):
# Do the actual booking process. We already verified the data in
# the preview step, and use the data from signed data package.
self.stage = 'post'
signed_data = self.request.POST.get('signed_data')
try:
data = signing.loads(signed_data, salt=self.salt, max_age=self.timeout)
with transaction.commit_manually():
# TODO:
# set form.email to some value
try:
purchase = Purchase(**data['purchase'])
purchase.conference = current_conference()
purchase.state = 'new'
purchase.payment_method = 'invoice'
purchase.save()
for td in data['tickets']:
ticket_type = TicketType.objects.select_related('content_type') \
.get(id=td['ticket_type_id'])
TicketClass = ticket_type.content_type.model_class()
ticket = TicketClass(**td)
ticket.purchase = purchase
ticket.save()
purchase.payment_total = purchase.calculate_payment_total()
purchase.save(update_fields=['payment_total'])
purchase.invoice_number = generate_invoice_number()
purchase.save(update_fields=['invoice_number'])
LogEntry.objects.log_action(
user_id=self.request.user.pk,
content_type_id=ctype(purchase).pk,
object_id=purchase.pk,
object_repr=force_text(purchase),
action_flag=ADDITION,
change_message='Checkin: Purchase created'
)
self.object = purchase
except Exception as e:
print(e)
transaction.rollback()
messages.error(self.request, _('An error occured while processing the purchase'))
return HttpResponseRedirect(reverse('checkin_purchase'))
else:
# Delete the purchase_key first in case a database error occurs
del self.request.session['purchase_key']
transaction.commit()
messages.success(self.request, _('Purchase successful!'))
render_invoice.delay(purchase_id=purchase.id,
send_purchaser=False)
return HttpResponseRedirect(self.get_success_url())
except signing.SignatureExpired:
messages.error(self.request, _('Session timed out. Please restart the purchase process.'))
except signing.BadSignature:
messages.error(self.request, _('Invalid data. Please restart the purchase process.'))
return HttpResponseRedirect(reverse('checkin_purchase'))
def form_valid(self, form, formset):
# We allow users to preview their purchase.
# We serialize all form data into one json object that is then
# signed using django.core.signing
self.stage = 'preview'
serialized_data = self.serialize(form, formset)
signed_data = signing.dumps(serialized_data, salt=self.salt, compress=True)
purchase = form.cleaned_data
tickets = []
payment_total = 0.0
for tform in formset.changed_forms:
t = tform.cleaned_data
# Copy for template access
t['ticket_type'] = t['ticket_type_id']
t['user'] = t['user_id']
t['sponsor'] = t['sponsor_id']
payment_total += t['ticket_type_id'].fee
tickets.append(t)
purchase['payment_total'] = payment_total
ctx = self.get_context_data(signed_data=signed_data,
purchase=purchase,
tickets=tickets)
return self.render_to_response(ctx)
def form_invalid(self, form, formset):
ctx = self.get_context_data(form=form, formset=formset)
return self.render_to_response(ctx)
def get_context_data(self, **kwargs):
ctx = super(OnDeskPurchaseView, self).get_context_data(**kwargs)
ctx['purchase_key'] = self.request.session.get('purchase_key')
ctx['stage'] = self.stage
if self.stage == 'preview':
pass
else:
ctx['empty_form'] = ctx['formset'].empty_form
return ctx
def get_success_url(self):
return reverse('checkin_purchase_detail', kwargs={'pk': self.object.pk})
def get_template_names(self):
if self.stage == 'preview':
return [self.template_name_preview]
return super(OnDeskPurchaseView, self).get_template_names()
def serialize(self, form, formset):
data = {}
data['purchase'] = {bf.name: bf.data for bf in form}
ticket_data = []
for tf in formset.changed_forms:
ticket_data.append({bf.name: bf.data for bf in tf})
data['tickets'] = ticket_data
return data
def start_session(self):
# Start new purchase session
self.request.session['purchase_key'] = force_text(uuid.uuid4())
def verify_session(self):
# A session is only valid if the key exists in the POST data and the
# session and the key is not None or ''
purchase_key_session = self.request.session.get('purchase_key', None)
purchase_key = self.request.POST.get('purchase_key', None)
return purchase_key and purchase_key_session == purchase_key
purchase_view = OnDeskPurchaseView.as_view()
class OnDeskPurchaseDetailView(CheckinViewMixin, SearchFormMixin, DetailView):
model = Purchase
template_name = 'checkin/ondesk_purchase_detail.html'
def get_context_data(self, **kwargs):
ctx = super(OnDeskPurchaseDetailView, self).get_context_data(**kwargs)
venues = VenueTicket.objects.filter(purchase_id=self.object.id).all()
sims = SIMCardTicket.objects.filter(purchase_id=self.object.id).all()
sups = SupportTicket.objects.filter(purchase_id=self.object.id).all()
ctx['tickets'] = chain(venues, sims, sups)
return ctx
def get_queryset(self):
qs = super(OnDeskPurchaseDetailView, self).get_queryset()
qs = qs.select_related('ticket_set__ticket_type__content_type')
return qs
purchase_detail_view = OnDeskPurchaseDetailView.as_view()
@permission_required('accounts.see_checkin_info')
def purchase_invoice_view(request, pk):
purchase = get_object_or_404(Purchase, pk=pk)
if purchase.exported:
response = HttpResponse(content_type='application/pdf')
ext = '.json' if settings.PURCHASE_INVOICE_DISABLE_RENDERING else '.pdf'
filename = '%s%s' % (purchase.full_invoice_number, ext)
response['Content-Disposition'] = 'attachment; filename="%s"' % filename
with open(purchase.invoice_filepath, 'rb') as f:
response.write(f.read())
return response
else:
messages.error(request, _('Invoice not yet exported.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': purchase.pk})
return HttpResponseRedirect(url)
@permission_required('accounts.see_checkin_info')
def purchase_badges_view(request, pk):
purchase = get_object_or_404(Purchase, pk=pk)
tickets = VenueTicket.objects.filter(purchase_id=purchase.pk)
return ticket_badge_view(request, tickets)
@require_POST
@permission_required('accounts.see_checkin_info')
@permission_required('accounts.perform_purchase')
def purchase_update_state(request, pk, new_state):
purchase = get_object_or_404(Purchase, pk=pk)
states = {
'paid': 'payment_received',
'unpaid': 'invoice_created',
'cancel': 'canceled',
}
state = states.get(new_state, None)
if state:
old_state = purchase.state
purchase.state = state
purchase.save(update_fields=['state'])
messages.success(request, _('Purchase marked as %(state)s.') % {
'state': new_state})
LogEntry.objects.log_action(
user_id=request.user.pk,
content_type_id=ctype(purchase).pk,
object_id=purchase.pk,
object_repr=force_text(purchase),
action_flag=CHANGE,
change_message='Checkin: state changed from %s to %s' % (old_state, state)
)
else:
messages.warning(request, _('Invalid state.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': purchase.pk})
return HttpResponseRedirect(url)
class OnDeskTicketUpdateView(CheckinViewMixin, SearchFormMixin, FormView):
form_class = EditOnDeskTicketForm
model = VenueTicket
template_name = 'checkin/ondesk_ticket_form.html'
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(self.model, pk=kwargs.get('pk'))
return super(OnDeskTicketUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(self.model, pk=kwargs.get('pk'))
return super(OnDeskTicketUpdateView, self).post(request, *args, **kwargs)
def form_valid(self, form):
for k, v in form.cleaned_data.items():
setattr(self.object, k, v)
self.object.save(update_fields=form.cleaned_data.keys())
LogEntry.objects.log_action(
user_id=self.request.user.pk,
content_type_id=ctype(self.object).pk,
object_id=self.object.pk,
object_repr=force_text(self.object),
action_flag=CHANGE,
change_message='Checkin: %s' % ', '. join(
'%s changed to %s' % (k, form.cleaned_data[k])
for k in form.changed_data
)
)
messages.success(self.request, _('Ticket sucessfully updated.'))
return super(OnDeskTicketUpdateView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(OnDeskTicketUpdateView, self).get_form_kwargs()
kwargs.update({
'users': get_users(),
'sponsors': get_sponsors()
})
return kwargs
def get_initial(self):
return {
'first_name': self.object.first_name,
'last_name': self.object.last_name,
'organisation': self.object.organisation,
'user_id': self.object.user_id,
'sponsor_id': self.object.sponsor_id,
}
def get_success_url(self):
return reverse('checkin_purchase_detail', kwargs={'pk': self.object.purchase.pk})
ticket_update_view = OnDeskTicketUpdateView.as_view()
@permission_required('accounts.see_checkin_info')
def ticket_badge_view(request, pk):
if isinstance(pk, models.query.QuerySet):
ticket = pk
else:
ticket = VenueTicket.objects.filter(pk=pk).select_related('purchase')
ticket = ticket.filter(canceled=False)
count = ticket.count()
if count == 0:
raise Http404
if ticket[0].purchase.state != 'payment_received':
messages.error(request, _('Invoice not yet paid.'))
url = reverse('checkin_purchase_detail', kwargs={'pk': ticket[0].purchase_id})
return HttpResponseRedirect(url)
be = BadgeExporter(ticket, 'https://ep14.org/u{uid}', indent=False)
data = be.export()
pdf = generate_badge(data)
if pdf is not None:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="badge.pdf"'
response.write(pdf)
return response
else:
msg = ungettext_lazy('Error generating the badge',
'Error generating the badges',
count)
messages.error(request, msg)
url = reverse('checkin_purchase_detail', kwargs={'pk': ticket[0].purchase_id})
return HttpResponseRedirect(url)
|
"""Read and Write STL Binary and ASCII Files."""
#
# import struct
#
#
# def load(inputfile):
# pass
#
#
# def dump(outputfile):
# struct.pack('<I', bboxe_count * 12)
# for bbox in bboxes:
# minx = bbox.Min.X
# miny = bbox.Min.Y
# minz = bbox.Min.Z
# maxx = bbox.Max.X
# maxy = bbox.Max.Y
# maxz = bbox.Max.Z
# facets = [
# [(0.0, -1.0, 0.0), [(minx, miny, minz),
# (maxx, miny, minz),
# (minx, miny, maxz)]],
# [(0.0, -1.0, 0.0), [(minx, miny, maxz),
# (maxx, miny, minz),
# (maxx, miny, maxz)]],
# [(0.0, 1.0, 0.0), [(minx, maxy, minz),
# (maxx, maxy, minz),
# (minx, maxy, maxz)]],
# [(0.0, 1.0, 0.0), [(minx, maxy, maxz),
# (maxx, maxy, minz),
# (maxx, maxy, maxz)]],
# [(-1.0, 0.0, 0.0), [(minx, miny, minz),
# (minx, miny, maxz),
# (minx, maxy, minz)]],
# [(-1.0, 0.0, 0.0), [(minx, maxy, minz),
# (minx, miny, maxz),
# (minx, maxy, maxz)]],
# [(1.0, 0.0, 0.0), [(maxx, miny, minz),
# (maxx, miny, maxz),
# (maxx, maxy, minz)]],
# [(1.0, 0.0, 0.0), [(maxx, maxy, minz),
# (maxx, miny, maxz),
# (maxx, maxy, maxz)]],
# [(0.0, 0.0, -1.0), [(minx, miny, minz),
# (minx, maxy, minz),
# (maxx, miny, minz)]],
# [(0.0, 0.0, -1.0), [(maxx, miny, minz),
# (minx, maxy, minz),
# (maxx, maxy, minz)]],
# [(0.0, 0.0, 1.0), [(minx, miny, maxz),
# (minx, maxy, maxz),
# (maxx, miny, maxz)]],
# [(0.0, 0.0, 1.0), [(maxx, miny, maxz),
# (minx, maxy, maxz),
# (maxx, maxy, maxz)]],
# ]
# for facet in facets:
# bboxfile.write(struct.pack('<3f', *facet[0]))
# for vertix in facet[1]:
# bboxfile.write(struct.pack('<3f', *vertix))
# # attribute byte count (should be 0 per specs)
# bboxfile.write('\0\0')
|
import re
import typing
from docstring import sum_numbers
def test_sum_numbers():
doc = f"\n{sum_numbers.__doc__.strip()}"
# for some lines allow variable content after colon
for line in (
r"Sums numbers",
r" :param numbers: \S.*?\n",
r" :type numbers: list",
r" :raises TypeError: \S.*?\n",
r" :return: \S.*?\n",
r" :rtype: int",
):
# newline to test proper indenting
assert re.search(rf"\n{line}", doc)
def test_sum_numbers_annotatios():
res = sum_numbers.__annotations__
assert res.get("numbers") == typing.List[int]
assert res.get("return") == int
|
from flask import Flask,render_template
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
app= Flask(__name__)
app.config['SECRET_KEY']='mysecretkey'
class InfoForm(FlaskForm):
breed=StringField("What breed are you?")
submit= SubmitField('Submit')
@app.route('/',methods=['GET','POST'])
def index():
breed= False
form = InfoForm()
if form.validate_on_submit():
breed=form.breed.data
form.breed.data= ' '
return render_template('00-home.html',form=form,breed=breed)
if __name__== '__main__':
app.run(debug=True)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FullTwoThinningNet(nn.Module):
def __init__(self, n, max_threshold, inner1_size=None, inner2_size=None, hidden_size=64,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningNet, self).__init__()
self.n = n
self.max_threshold = max_threshold
self.device = device
self.inner1_size = inner1_size if inner1_size else max(self.max_threshold + 1, self.n // 2) # TODO: 256
self.inner2_size = inner2_size if inner2_size else max(self.max_threshold + 1,
self.inner1_size // 2) # TODO: 128
self.fc = nn.Sequential( # TODO: maybe try with just two layers
nn.Linear(self.n, self.inner1_size),
nn.ReLU(),
nn.Linear(self.inner1_size, self.inner2_size),
nn.ReLU(),
nn.Linear(self.inner2_size, self.max_threshold + 1)
)
self.to(self.device).double()
def forward(self, x):
x = x.double().sort()[0].to(self.device)
res = self.fc(x)
return res
class FullTwoThinningOneHotNet(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=64, inner1_size=None, inner2_size=None,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningOneHotNet, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.inner1_size = inner1_size if inner1_size else max(self.max_threshold + 1,
(self.n // 2) * (
self.max_possible_load + 1) // 2) # TODO: 256
self.inner2_size = inner2_size if inner2_size else max(self.max_threshold + 1,
self.inner1_size // 4) # TODO: 128
self.fc = nn.Sequential( # TODO: maybe try with just two layers
nn.Linear(self.n * (self.max_possible_load + 1), self.inner1_size),
nn.ReLU(),
nn.Linear(self.inner1_size, self.inner2_size),
nn.ReLU(),
nn.Linear(self.inner2_size, self.max_threshold + 1)
)
self.to(self.device).double()
def forward(self, x):
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).view(-1, self.n * (
self.max_possible_load + 1)).double().to(self.device)
res = self.fc(x)
return res
class FullTwoThinningRecurrentNet(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=64,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningRecurrentNet, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.hidden_size = hidden_size
self.rnn = nn.RNN(input_size=self.max_possible_load + 1, hidden_size=self.hidden_size, batch_first=True)
self.to(self.device).double()
def forward(self, x):
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).double().to(self.device)
x = self.rnn(x)[0][:, -1, :].squeeze(1)
return x
class FullTwoThinningRecurrentNetFC(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=64,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningRecurrentNetFC, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.hidden_size = hidden_size # self.max_threshold + 1
# TODO: one extra layer converting one-hot to embedding (same for all loads)
self.rnn = nn.RNN(input_size=self.max_possible_load + 1, hidden_size=self.hidden_size, batch_first=True)
self.lin = nn.Linear(self.hidden_size, self.max_threshold + 1)
self.to(self.device).double()
def forward(self, x):
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).double().to(self.device)
x = self.rnn(x)[0][:, -1, :].squeeze(1)
x = self.lin(x)
return x
class FullTwoThinningGRUNetFC(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=64,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningGRUNetFC, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.hidden_size = hidden_size # self.max_threshold + 1
# TODO: one extra layer converting one-hot to embedding (same for all loads)
self.rnn = nn.GRU(input_size=self.max_possible_load + 1, hidden_size=self.hidden_size, batch_first=True)
self.lin = nn.Linear(self.hidden_size, self.max_threshold + 1)
self.to(self.device).double()
def forward(self, x):
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).double().to(self.device)
x = self.rnn(x)[0][:, -1, :].squeeze(1)
x = self.lin(x)
return x
class FullTwoThinningClippedRecurrentNetFC(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=64,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(FullTwoThinningClippedRecurrentNetFC, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.hidden_size = hidden_size # self.max_threshold + 1
# TODO: one extra layer converting one-hot to embedding (same for all loads)
self.rnn = nn.RNN(input_size=self.max_possible_load + 1, hidden_size=self.hidden_size, batch_first=True)
self.lin = nn.Linear(self.hidden_size, self.max_threshold + 1)
# TODO: add softmax, pass previous threshold as argument
self.to(self.device).double()
def forward(self, x):
x = x.minimum(torch.tensor(self.max_possible_load))
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).double().to(self.device)
x = self.rnn(x)[0][:, -1, :].squeeze(1)
x = self.lin(x)
return x
class GeneralNet(nn.Module):
def __init__(self, n, max_threshold, max_possible_load, hidden_size=128, rnn_num_layers=3, num_lin_layers=2,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu")):
super(GeneralNet, self).__init__()
self.n = n
self.max_possible_load = max_possible_load
self.max_threshold = max_threshold
self.device = device
self.hidden_size = hidden_size # self.max_threshold + 1
self.rnn = nn.RNN(input_size=self.max_possible_load + 1, num_layers=rnn_num_layers,
hidden_size=self.hidden_size, batch_first=True)
self.relu = nn.ReLU()
self.linear_block = []
for _ in range(num_lin_layers - 1):
self.linear_block.append(nn.Linear(self.hidden_size, self.hidden_size))
self.linear_block.append(self.relu)
self.linear_block.append(nn.Linear(self.hidden_size, self.max_threshold + 1))
self.linear_block = nn.Sequential(*self.linear_block)
self.to(self.device).double()
def forward(self, x):
x = x.minimum(torch.tensor(self.max_possible_load))
x = F.one_hot(x.sort()[0], num_classes=self.max_possible_load + 1).double().to(self.device)
x = self.rnn(x)[0][:, -1, :].squeeze(1)
x = self.linear_block(x)
return x
|
MenuBackground = 'phase_3/maps/loading_bg_clouds.jpg'
GameLogo = 'phase_3/maps/toontown-logo.png'
GuiModel = loader.loadModel('phase_3/models/gui/pick_a_toon_gui')
Settings_Open = 'phase_3/audio/sfx/panorama_settings_slide_in_01.ogg'
Settings_Close = 'phase_3/audio/sfx/panorama_settings_slide_out_01.ogg' |
"""
Utilities for working with DynamoDB.
- :func:`.marshall`
- :func:`.unmarshal`
This module contains some helpers that make working with the
Amazon DynamoDB API a little less painful. Data is encoded as
`AttributeValue`_ structures in the JSON payloads and this module
defines functions that will handle the transcoding for you for
the vast majority of types that we use.
.. _AttributeValue: http://docs.aws.amazon.com/amazondynamodb/latest/
APIReference/API_AttributeValue.html
"""
import base64
import datetime
import uuid
import sys
PYTHON3 = True if sys.version_info > (3, 0, 0) else False
TEXT_CHARS = bytearray({7, 8, 9, 10, 12, 13, 27} |
set(range(0x20, 0x100)) - {0x7f})
if PYTHON3: # pragma: nocover
unicode = str
def is_binary(value):
"""
Check to see if a string contains binary data in Python2
:param str|bytes value: The value to check
:rtype: bool
"""
return bool(value.translate(None, TEXT_CHARS))
def marshall(values):
"""
Marshall a `dict` into something DynamoDB likes.
:param dict values: The values to marshall
:rtype: dict
:raises ValueError: if an unsupported type is encountered
Return the values in a nested dict structure that is required for
writing the values to DynamoDB.
"""
serialized = {}
for key in values:
serialized[key] = _marshall_value(values[key])
return serialized
def unmarshall(values):
"""
Transform a response payload from DynamoDB to a native dict
:param dict values: The response payload from DynamoDB
:rtype: dict
:raises ValueError: if an unsupported type code is encountered
"""
unmarshalled = {}
for key in values:
unmarshalled[key] = _unmarshall_dict(values[key])
return unmarshalled
def _encode_binary_set(value):
"""Base64 encode binary values in list of values.
:param set value: The list of binary values
:rtype: list
"""
return sorted([base64.b64encode(v).decode('ascii') for v in value])
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
if not value:
return {'NULL': True}
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
if not value:
return {'NULL': True}
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
def _to_number(value):
"""
Convert the string containing a number to a number
:param str value: The value to convert
:rtype: float|int
"""
return float(value) if '.' in value else int(value)
def _unmarshall_dict(value):
"""Unmarshall a single dict value from a row that was returned from
DynamoDB, returning the value as a normal Python dict.
:param dict value: The value to unmarshall
:rtype: mixed
:raises ValueError: if an unsupported type code is encountered
"""
key = list(value.keys()).pop()
if key == 'B':
return base64.b64decode(value[key].encode('ascii'))
elif key == 'BS':
return set([base64.b64decode(v.encode('ascii'))
for v in value[key]])
elif key == 'BOOL':
return value[key]
elif key == 'L':
return [_unmarshall_dict(v) for v in value[key]]
elif key == 'M':
return unmarshall(value[key])
elif key == 'NULL':
return None
elif key == 'N':
return _to_number(value[key])
elif key == 'NS':
return set([_to_number(v) for v in value[key]])
elif key == 'S':
return value[key]
elif key == 'SS':
return set([v for v in value[key]])
raise ValueError('Unsupported value type: %s' % key)
|
# -*- coding: utf-8 -*-
"""
MQTTクライアントの基底オブジェクトを提供する。
paho-mqttパッケージを使用しているため、利用する場合は
インストールが必要となる。
"""
import os
import json
import yaml
import paho.mqtt.client as mqtt
class BaseClient:
"""
MQTTクライアント基底クラス。
本クラスはpublisher、subscriber両方の機能を内包しているため、
どちらかの機能に特化したサブクラスを作成することを推奨する。
"""
def __init__(self,
host, port=None, keepalive=None,
client_id=None, protocol=mqtt.MQTTv311,
user=None, password=None,
debug=False):
"""
(共通)インスタンス変数を初期化する。
インスタンス化された時点でクライアントオブジェクトは生成されるが、
接続は行わない。
引数
host ブローカのホスト名(必須)
port ブローカと接続するポート(デフォルトはNone)
client_id クライアントID(使用しないブローカの場合は指定しない)
protocol プロトコル名(デフォルトはNone)
keepalive 接続確認を行う間隔
user ユーザ(認証しない場合は指定しない)
passowrd パスワード(認証しない場合は指定しない)
debug デバッグモード(デフォルトはFalse)
戻り値
なし
"""
self.debug = debug
if host is None:
raise Exception('cannot connect mqtt host')
self.host = host
if port is not None:
port = int(port)
self.port = port
if keepalive is not None:
keepalive = int(keepalive)
self.keepalive = keepalive
# MQTT clientのインスタンス化
self.client = self.get_client(client_id, protocol)
# 認証情報のセット
self._set_auth(user, password)
def get_client(self, client_id, protocol):
"""
(共通)MQTTブローカクライアントオブジェクトを生成する。
引数
client_id クライアントID(使用しないブローカの場合は指定しない)
protocol プロトコル名(デフォルトはNone)
戻り値
client MQTTブローカクライアント
"""
if client_id is None and protocol is None:
client = mqtt.Client()
if self.debug:
print('client instantiate')
elif client_id is None and protocol is not None:
client = mqtt.Client(protocol=protocol)
if self.debug:
print('client instantiate with protocol')
elif client_id is not None and protocol is None:
client = mqtt.Client(client_id)
if self.debug:
print('client instantiate client_id=' + client_id)
else:
client = mqtt.Client(client_id, protocol=protocol)
if self.debug:
print('client instantiate client_id=' + client_id + ' with protocol')
return client
def _set_auth(self, user, password):
"""
(共通)認証情報をクライアントオブジェクトにセットする。
引数
user ユーザ(認証しない場合は指定しない)
passowrd パスワード(認証しない場合は指定しない)
戻り値
なし
"""
if user is not None and password is not None:
self.client.username_pw_set(user, password)
if self.debug:
print('set user=' + user + ', password=' + password)
def _connect(self):
"""
(共通)インスタンス変数に格納された情報をもとにMQTTブローカへ接続する。
引数
なし
戻り値
なし
"""
if self.port is None and self.keepalive is None:
self.client.connect(self.host)
elif self.port is None and self.keepalive is not None:
self.client.connect(self.host, keepalive=self.keepalive)
elif self.port is not None and self.keepalive is None:
self.client.connect(self.host, port=self.port)
else:
print(type(self.port))
self.client.connect(self.host, port=self.port, keepalive=self.keepalive)
if self.debug:
print('connect to mqtt broker')
def subscribe(self, topic):
"""
(サブスクライバ)サブスクライバとして待ち受けを開始する。
コールバック関数を登録し、MQTTブローカと接続した後、
クライアントを待ち受けループ状態にする。
停止させたい場合は、disconnect()を呼び出す。
引数
topic 購読するトピック名
戻り値
なし
"""
self.topic = topic
self.client.on_connect = self._on_connect
self.client.on_message = self._on_message
if self.debug:
print('set callback functions')
self._connect()
self.client.loop_start()
if self.debug:
print('loop thread started')
if self.debug:
print('start loop for subscriber')
def unsubscribe(self):
self.client.loop_stop()
if self.debug:
print('loop thread stoped')
def _on_connect(self, client, userdata, flags, response_code):
"""
(サブスクライバ)接続時コールバック関数。
トピックの購読を開始する。
引数
client
userdata
flags
response_code
戻り値
なし
"""
if self.debug:
print('on connect code=' + str(response_code))
client.subscribe(self.topic)
if self.debug:
print('subscribe topic=' + self.topic)
def _on_message(self, client, userdata, msg):
"""
(サブスクライバ)購読メッセージ受領時コールバック関数。
購読メッセージを文字列化してテンプレートメソッドon_messageを呼び出す。
引数
client
userdata
msg
戻り値
なし
"""
body = msg.payload.decode()
self.payload = json.loads(body)
if self.debug:
print('msg.payload: ' + body)
self.on_message(self.payload)
def on_message(self, payload):
"""
(サブスクライバ)購読メッセージを受領したら本メソッドが呼び出される。
本メソッドの実装はないためなにもしないが、サブスクライバサブクラスでは
本メソッドをオーバライドしてインスタンス変数に値を格納する。
引数
payload メッセージ(JSONオブジェクト)
戻り値
なし
"""
if self.debug:
print('no operation in on_message template method')
def publish(self, topic, msg_dict):
if topic is None:
raise Exception('no topic')
if msg_dict is None:
raise Exception('no message')
self._connect()
self.client.publish(topic, json.dumps(msg_dict))
if self.debug:
print('publish topic: ' + topic + ', message: ' + str(msg_dict))
def disconnect(self):
self.client.disconnect()
if self.debug:
print('disconnect mqtt broker')
class BrokerConfig:
# テンプレートのタイプ名
TEMPLATE = 'template'
def __init__(self, config_path='mqtt/brokers.yml', debug=False):
"""
コンストラクタ。
設定ファイルを読み込みインスタンス変数configへ格納する。
引数
conf_path 設定ファイルへのパス
debug デバッグモード
戻り値
なし
"""
# デバッグモードの格納
self.debug = debug
# 設定ファイルの確認
if config_path is None:
raise Exception('no config path')
path = os.path.expanduser(config_path)
if not os.path.exists(path):
raise Exception('no config file: ' + config_path)
# 設定ファイル読み込み
with open(path, 'r') as f:
config = yaml.load(f)
# ブローカ名リストibm,mosquitto
self.brokers = list(config.keys())
self.config = {}
# ibm, mosq..
for broker in self.brokers:
# template, product, test..
stages = list(config[broker].keys())
# brokerの設定タイプ
stage_config = {}
for stage in stages:
# template は書き方例なので飛ばす
if stage == self.TEMPLATE:
continue
# template以外は残す
stage_config[stage] = (config[broker])[stage]
self.config[broker] = stage_config
if self.debug:
print(self.config)
def get_brokers(self):
return list(self.config.keys())
def get_stages(self, broker):
brokers = self.get_brokers()
if broker not in brokers:
raise Exception('no broker:' + broker)
return list((self.config[broker]).keys())
def get_keys(self, broker, stage):
stages = self.get_stages(broker)
if stage not in stages:
raise Exception('no stage:' + stage + ' in ' + broker)
return list(((self.config[broker])[stage]).keys())
def get_value(self, broker, stage, key):
keys = self.get_keys(broker, stage)
if key not in keys:
raise Exception('no key:' + key + ' in ' + stage + ' in ' + broker)
return ((self.config[broker])[stage])[key]
|
# Type of the value
x = 7
print('x is {}'.format(x))
print(type(x))
# String Type
x = 'seven'.upper()
print('x is {}'.format(x))
print(type(x))
## F -strings
a = 8
b = 9
x = f'seven {a} {b}'
print('x is {}'.format(x))
print(type(x))
### OR
x = 'seven {} {}'.format(8,9)
print('x is {}'.format(x))
print(type(x))
# Numeric Type
x = 7
print('x is {}'.format(x))
print(type(x))
y = 7 % 3 # Division
print(y)
z = 7 // 3 # Reminder
print(z)
from decimal import *
a = Decimal('.10')
b = Decimal('.30')
x = a + a + a -b
print('x is {}'.format(x))
print(type(x))
# Boolean Type
x = True
print('x is {}'.format(x))
print(type(x))
if x:
print("True")
else:
print("False")
# Sequence Type
x = [1,2,3,4,5]
x[2]
for i in x :
print('i is {}'.format(i))
x = [1,2,3,4,5]
x[0] = 45
for i in x :
print('i is {}'.format(i))
x = range(5,10, 2) # (minimum, maximum and step)
for i in x :
print('i is {}'.format(i))
x = { 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5 }
for k, v in x.items():
print('k: {}, v: {}'.format(k, v))
x = { 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5 }
x['three'] = 42
for k, v in x.items():
print('k: {}, v: {}'.format(k, v))
# Type() and id() # type is a class
x = (1,'two', 3, 4.0, 5)
y = (2, 4.0, 'three', [4, "four"])
print('x is {}'.format(x))
print(type(x[1]))
if x is y:
print("yes")
else:
print("No")
if isinstance(y, tuple):
print("tuple")
elif isinstance(y, list):
print("list")
else:
print("No")
print(id(x))
print(id(y)) |
from sys import exit, argv
from cs50 import SQL
import csv
# check for valadity of command line arguments
if len(argv) != 2:
print("usage: import.py characters.csv")
exit(1)
name = []
# initiate connection with database students.db
db = SQL("sqlite:///students.db")
# open the csv file provided at commandline
with open(argv[1], "r") as file:
reader = csv.DictReader(file)
# iterate for each row in csv file
for row in reader:
name = row["name"].split()
house = row["house"]
birth = row["birth"]
# insert data read from csv file into students table inside students.db
if len(name) == 2:
db.execute("INSERT INTO students (first, middle, last, house, birth) values (?, ?, ?, ?, ?)",
name[0], None, name[1], house, birth)
if len(name) == 3:
db.execute("INSERT INTO students (first, middle, last, house, birth) values (?, ?, ?, ?, ?)",
name[0], name[1], name[2], house, birth)
exit(0) |
# -*- coding: utf-8 -*-
from simmate.database.base_data_types import table_column, DatabaseTable
from django.apps import apps as django_apps
from prefect import Client
from prefect.utilities.graphql import with_args
import plotly.graph_objects as plotly_go
class EvolutionarySearch(DatabaseTable):
"""
This database table holds all of the information related to an evolutionary
search and also has convient methods to analyze the data.
Loading Results
---------------
Typically, you'll load your search through a search id or a composition:
.. code-block:: python
from simmate.shortcuts import SearchResults
# if you know the id
search_results = SearchResults.objects.get(id=123)
# if you know the composition
search_results = SearchResults.objects.get(id="Ca2 N1")
Alternatively, you can find these out by looking at a table of all the
evolutionary searches that have been ran:
.. code-block:: python
all_searches = SearchResults.objects.to_dataframe()
Viewing Results
---------------
The first thing you may want to check is the best structure found. To access
this and write it to a file:
.. code-block:: python
# loads the best structure and converts it to a pymatgen structure object
structure = search_results.best_individual.to_toolkit()
# writes it to a cif file
structure.to("cif", "best_structure.cif")
To view convergence of the search, you can use the convenient plotting methods.
Note: this will open up the plot in your default browser, so this command
won't work properly through an ssh terminal.
.. code-block:: python
search_results.view_convergence_plot()
If you are benchmarking Simmate to see if it found a particular structure,
you can use:
.. code-block:: python
from simmate.toolkit import Structure
structure = Structure.from_file("example123.cif")
search_results.view_correctness_plot(structure)
Beyond plots, you can also access a table of all calculated structures:
.. code-block:: python
dataframe = search_results.individuals_completed.to_dataframe()
"""
class Meta:
app_label = "workflows"
# consider formula_full or chemical_system by making a composition-based mixin
composition = table_column.CharField(max_length=50) # !!! change to formula_full?
# Import path for the workflow(s)
individuals_datatable_str = table_column.CharField(max_length=200)
# List of import paths for workflows used at any point. While all workflows
# populate the individuals_datatable, they might do this in different ways.
# For example, one may start with a ML prediction, another runs a series
# of VASP relaxations, and another simply copies a structure from AFLOW.
workflows = table_column.JSONField()
# Import path that grabs the fitness value
# I assume energy for now
# fitness_function = table_column.CharField(max_length=200)
max_structures = table_column.IntegerField()
limit_best_survival = table_column.IntegerField()
# relationships
# sources / individuals
# stop_conditions
# get_stats:
# Total structure counts
# makeup of random, heredity, mutation, seeds, COPEX
@property
def individuals_datatable(self):
# NOTE: this table just gives the class back and doesn't filter down
# to the relevent individuals for this search. For that, use the "individuals"
# property
# we assume the table is registered in the local_calcs app
return django_apps.get_model(
app_label="workflows",
model_name=self.individuals_datatable_str,
)
@property
def individuals(self):
# note we don't call "all()" on this queryset yet becuase this property
# it often used as a "base" queryset (and additional filters are added)
return self.individuals_datatable.objects.filter(formula_full=self.composition)
@property
def individuals_completed(self):
# If there is an energy_per_atom, we can treat the calculation as completed
return self.individuals.filter(energy_per_atom__isnull=False)
@property
def best_individual(self):
best = self.individuals_completed.order_by("energy_per_atom").first()
return best
def get_convergence_plot(self):
# Grab the calculation's structure and convert it to a dataframe
structures_dataframe = self.individuals_completed.to_dataframe()
# There's only one plot here, no subplot. So we make the scatter
# object and just pass it directly to a Figure object
scatter = plotly_go.Scatter(
x=structures_dataframe["updated_at"],
y=structures_dataframe["energy_per_atom"],
mode="markers",
)
figure = plotly_go.Figure(data=scatter)
figure.update_layout(
xaxis_title="Date Completed",
yaxis_title="Energy (eV/atom)",
)
# we return the figure object for the user
return figure
def view_convergence_plot(self):
figure = self.get_convergence_plot()
figure.show(renderer="browser")
def get_correctness_plot(self, structure_known):
# --------------------------------------------------------
# This code is from simmate.toolkit.validators.fingerprint.pcrystalnn
# OPTIMIZE: There should be a convience method to make this featurizer
# since I use it so much
import numpy
from simmate.toolkit import Composition
from matminer.featurizers.site import CrystalNNFingerprint
from matminer.featurizers.structure.sites import PartialsSiteStatsFingerprint
sitefingerprint_method = CrystalNNFingerprint.from_preset(
"ops", distance_cutoffs=None, x_diff_weight=3
)
featurizer = PartialsSiteStatsFingerprint(
sitefingerprint_method,
stats=["mean", "std_dev", "minimum", "maximum"],
)
featurizer.elements_ = numpy.array(
[element.symbol for element in Composition(self.composition).elements]
)
# --------------------------------------------------------
# Grab the calculation's structure and convert it to a dataframe
structures_dataframe = self.individuals_completed.to_dataframe()
# because we are using the database model, we first want to convert to
# pymatgen structures objects and add a column to the dataframe for these
#
# structures_dataframe["structure"] = [
# structure.to_toolkit() for structure in ionic_step_structures
# ]
#
# BUG: the read_frame query creates a new query, so it may be a different
# length from ionic_step_structures. For this reason, we can't iterate
# through the queryset like in the commented out code above. Instead,
# we need to iterate through the dataframe rows.
# See https://github.com/chrisdev/django-pandas/issues/138 for issue
from simmate.toolkit import Structure
structures_dataframe["structure"] = [
Structure.from_str(s.structure_string, fmt="POSCAR")
for _, s in structures_dataframe.iterrows()
]
from tqdm import tqdm
structures_dataframe["fingerprint"] = [
numpy.array(featurizer.featurize(s.structure))
for _, s in tqdm(structures_dataframe.iterrows())
]
fingerprint_known = numpy.array(featurizer.featurize(structure_known))
structures_dataframe["fingerprint_distance"] = [
numpy.linalg.norm(fingerprint_known - s.fingerprint)
for _, s in tqdm(structures_dataframe.iterrows())
]
# There's only one plot here, no subplot. So we make the scatter
# object and just pass it directly to a Figure object
scatter = plotly_go.Scatter(
x=structures_dataframe["updated_at"],
y=structures_dataframe["fingerprint_distance"],
mode="markers",
marker_color=structures_dataframe["energy_per_atom"],
)
figure = plotly_go.Figure(data=scatter)
figure.update_layout(
xaxis_title="Date Completed",
yaxis_title="Distance from Known Structure",
)
# we return the figure object for the user
return figure
def view_correctness_plot(self, structure_known):
figure = self.get_correctness_plot(structure_known)
figure.show(renderer="browser")
class StructureSource(DatabaseTable):
class Meta:
app_label = "workflows"
name = table_column.CharField(max_length=50)
is_steadystate = table_column.BooleanField()
is_singleshot = table_column.BooleanField()
settings = table_column.JSONField(default=dict)
# timestamping for when this was added to the database
# This also gives a feel for how long the steady-state was running
created_at = table_column.DateTimeField(auto_now_add=True)
updated_at = table_column.DateTimeField(auto_now=True)
# This list limits to ids that are submitted or running
prefect_flow_run_ids = table_column.JSONField(default=list)
search = table_column.ForeignKey(
EvolutionarySearch,
on_delete=table_column.CASCADE,
related_name="sources",
)
def update_flow_run_ids(self):
# Using our list of current run ids, we query prefect to see which of
# these still are running or in the queue.
# OPTIMIZE: This may be a really bad way to query Prefect...
query = {
"query": {
with_args(
"flow_run",
{
"where": {
"state": {"_in": ["Running", "Scheduled"]},
"id": {"_in": self.prefect_flow_run_ids},
},
},
): ["id"]
}
}
client = Client()
result = client.graphql(query)
# graphql gives a weird format, so I reparse it into just a list of ids
result = [run["id"] for run in result["data"]["flow_run"]]
# we now have our new list of IDs! Let's update it to the database
self.prefect_flow_run_ids = result
self.save()
# in case we need the list of ids, we return it too
return result
@property
def nprefect_flow_runs(self):
# update our ids before we report how many there are.
runs = self.update_flow_run_ids()
# now the currently running ones is just the length of ids!
return len(runs)
|
from all_imports import *
hashtag = '#100daysofcodechallenge'
tweetnumber = 10
tweets = tweepy.Cursor(api.search, hashtag).items(tweetnumber)
def searchBot():
for tweet in tweets:
try:
tweet.retweet()
api.create_favorite(tweet.id)
api.update_status('@' + tweet.entities['user_mentions'][0]['screen_name']+' ☑️Keep Going!!! ' + tweet.entities['user_mentions'][0]['name'], tweet.id)
if 'day' in tweet.text.lower():
api.create_friendship(tweet.entities['user_mentions'][0]['id'])
time.sleep(15)
except tweepy.TweepError as e:
print(e.reason)
time.sleep(15)
while True:
searchBot()
time.sleep(500)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-03 13:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('unlocode', '0008_locchangetags'),
]
operations = [
migrations.CreateModel(
name='LocCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alpha2code', models.CharField(max_length=2)),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='LocSubdivison',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alpha2code', models.CharField(max_length=2)),
('shortcode', models.CharField(max_length=5)),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='LocVersion',
fields=[
('version', models.CharField(max_length=6, primary_key=True, serialize=False)),
],
),
migrations.RemoveField(
model_name='locode',
name='locversion',
),
migrations.AlterField(
model_name='locode',
name='locchangeindicator',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.PROTECT, to='unlocode.LocChangeIndicator', to_field='changecode'),
),
migrations.AlterField(
model_name='locode',
name='locstatus',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.PROTECT, to='unlocode.LocStatus', to_field='statuscode'),
),
migrations.AddField(
model_name='locsubdivison',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='unlocode.LocVersion'),
),
migrations.AddField(
model_name='loccountry',
name='version',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='unlocode.LocVersion'),
),
migrations.AddField(
model_name='locfunction',
name='version',
field=models.ForeignKey(default='init', on_delete=django.db.models.deletion.CASCADE, to='unlocode.LocVersion'),
preserve_default=False,
),
migrations.AddField(
model_name='locode',
name='version',
field=models.ForeignKey(default='init', on_delete=django.db.models.deletion.CASCADE, to='unlocode.LocVersion'),
preserve_default=False,
),
migrations.AddField(
model_name='locstatus',
name='version',
field=models.ForeignKey(default='init', on_delete=django.db.models.deletion.CASCADE, to='unlocode.LocVersion'),
preserve_default=False,
),
]
|
from itertools import permutations
def is_valid(permutation):
# Loes woont niet op de bovenste verdieping.
# Niels woont niet op de bovenste verdieping.
if permutation[4] == 'L' or permutation[4] == 'N':
return False
# Marja woont niet op de begane grond.
# Niels woont niet op de begane grond.
if permutation[0] == 'M' or permutation[0] == 'N':
return False
# Erik woont tenminste één verdieping hoger dan Marja.
e, m = permutation.index('E'), permutation.index('M')
if (m + 1) > e:
return False
# Joep woont niet op een verdieping één hoger of lager dan Niels.
j, n = permutation.index('J'), permutation.index('N')
if (j + 1 == n) or (j - 1 == n):
return False
# Niels woont niet op een verdieping één hoger of lager dan Marja.
if (n + 1 == m) or (n - 1 == m):
return False
return True
def solve():
# Solve the floor puzzle.
people = ('L', 'M', 'N', 'E', 'J')
# Walk through every possible permutation.
for permutation in list(permutations(people)):
if is_valid(permutation):
print('Valid:', permutation)
if __name__ == '__main__':
solve()
|
import pickle
import numpy as np
from collections import Counter
# load pubmed paper data that is also in S2
with open('data/pubmed_s2_data.pickle', 'rb') as f:
pubmed_s2_data = pickle.load(f)
with open('data/pubmed_s2_id_maps.pickle', 'rb') as f:
s2id_to_pmid, pmid_to_s2id = pickle.load(f)
# import mesh
# note there are multiple tree paths for each one
term2mesh = {}
mesh2term = {}
with open('data/mtrees2018.bin', 'r') as f:
for line in f:
term, meshid = line.strip().split(';')
meshid = tuple(meshid.split('.'))
if term not in term2mesh:
term2mesh[term] = []
term2mesh[term].append(meshid)
mesh2term[meshid] = term
# for each term, figure out of its parent has relevant key words:
# disease, vaccination, disorder, pathological or neoplasms
disease_match = {}
for term, meshids in term2mesh.items():
meshids_cum = [[meshid[:i] for i in range(1, len(meshid) + 1)]
for meshid in meshids]
tree_paths = [[mesh2term[j].lower() for j in js]
for js in meshids_cum]
has_terms = [['disease' in i or
'vaccination' in i or
'disorder' in i or
'pathological' in i or
'neoplasms' in i
for i in tree_path]
for tree_path in tree_paths]
disease_match[term] = np.any(np.any(has_terms))
# get all the most common important mesh terms in pubmed_s2_data
counter = Counter()
pmid_to_mesh = {}
has_disease_count = 0
for pmid, trial_result in pubmed_s2_data.items():
if trial_result['meshlist'] is not None:
# find disease mesh terms first
disease_mesh = np.array(
[i for i in trial_result['meshlist']
if i[0] in disease_match
and disease_match[i[0]]]
)
if len(disease_mesh) > 0:
# if there are important terms, just get those
disease_mesh_y = disease_mesh[disease_mesh[:, 2] == 'Y', 0]
if len(disease_mesh_y) > 0:
disease_mesh = disease_mesh_y
else: # otherwise, take all disease terms
disease_mesh = disease_mesh[:, 0]
counter.update(disease_mesh)
has_disease_count += 1
pmid_to_mesh[pmid] = disease_mesh
with open('data/pubmed_id_mesh_map.pickle', 'wb') as f:
pickle.dump(pmid_to_mesh, f)
|
# Time: O(n)
# Space: O(n)
# 838 weekly contest 85 5/19/2018
# There are N dominoes in a line, and we place each domino vertically upright.
#
# In the beginning,
# we simultaneously push some of the dominoes either to the left or
# to the right.
#
# After each second,
# each domino that is falling to the left pushes the adjacent domino
# on the left.
#
# Similarly, the dominoes falling to the right push their adjacent dominoes
# standing on the right.
#
# When a vertical domino has dominoes falling on it from both sides,
# it stays still due to the balance of the forces.
#
# For the purposes of this question,
# we will consider that a falling domino expends no additional force to a
# falling or already fallen domino.
#
# Given a string "S" representing the initial state. S[i] = 'L',
# if the i-th domino has been pushed to the left; S[i] = 'R',
# if the i-th domino has been pushed to the right; S[i] = '.',
# if the i-th domino has not been pushed.
#
# Return a string representing the final state.
#
# Example 1:
#
# Input: ".L.R...LR..L.."
# Output: "LL.RR.LLRRLL.."
# Example 2:
#
# Input: "RR.L"
# Output: "RR.L"
# Explanation: The first domino expends no additional force
# on the second domino.
# Note:
# - 0 <= N <= 10^5
# - String dominoes contains only 'L', 'R' and '.'
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def pushDominoes(self, dominoes):
"""
:type dominoes: str
:rtype: str
"""
force = [0]*len(dominoes)
f = 0
for i in xrange(len(dominoes)):
if dominoes[i] == 'R':
f = len(dominoes)
elif dominoes[i] == 'L':
f = 0
else:
f = max(f-1, 0)
force[i] += f
f = 0
for i in reversed(xrange(len(dominoes))):
if dominoes[i] == 'L':
f = len(dominoes)
elif dominoes[i] == 'R':
f = 0
else:
f = max(f-1, 0)
force[i] -= f
return "".join('.' if f == 0 else 'R' if f > 0 else 'L'
for f in force)
def pushDominoes_ming(self, dominoes):
dominoes = list(dominoes)
i, leftStart = 0, 0
while i < len(dominoes):
while i < len(dominoes) and dominoes[i] == '.':
i += 1
if i < len(dominoes):
if dominoes[i] == 'L':
dominoes[leftStart:i+1] = 'L'*(i+1-leftStart)
i += 1
leftStart = i
elif dominoes[i] == 'R':
j = i + 1
while j < len(dominoes) and dominoes[j] == '.':
j += 1
if j == len(dominoes) or dominoes[j] == 'R':
dominoes[i:j] = 'R' * (j-i)
i = j
elif dominoes[j] == 'L':
half = (j+1-i) // 2
dominoes[i:i+half] = 'R' * half
dominoes[j+1-half:j+1] = 'L' * half
i = j + 1
leftStart = i
return ''.join(dominoes)
print(Solution().pushDominoes(".L.R...LR..L..")) # "LL.RR.LLRRLL.."
print(Solution().pushDominoes("RR.L")) # "RR.L"
|
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class PostPagination(PageNumberPagination):
page_size = 50 # default size if no size parameter passed
page_size_query_param = 'size'
page_query_param = 'page'
def get_paginated_response(self, data):
return Response({
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'count': self.page.paginator.count,
'posts': data
})
class CommentPagination(PageNumberPagination):
page_size = 5
page_size_query_param = 'size'
page_query_param = 'page'
def get_paginated_response(self, data):
return Response({
'next': self.get_next_link(),
'previous': self.get_previous_link(),
'count': self.page.paginator.count,
'comments': data
}) |
# (c) 2019 Francis Taylor
# This code is licensed under the MIT license (see LICENSE for details)
# This code is provided as a guide for learning. Please do not copy it blindly.
import math
# cos(x) - x^3 = 0
# x_(n+1) = x_n - f(x_n)/[dy/dx]
# abs(x_n) < 1.0e-12
x_old = 0
x_new = 1
def newton(x_old):
'''This function computes x_(n+1) = x_n - f(x_n)/[dy/dx] '''
x_new = x_old - ((math.cos(x_old) - math.pow(x_old, 3))
/ (-1 * math.sin(x_old) - 3*math.pow(x_old, 2)))
return x_new
i = 0
while True:
if abs(x_new - x_old) < 1.0e-12: # checks if the values have converged
print('Solution found: {}'.format(x_new))
break
else:
x_old = x_new
x_new = newton(x_old)
print('Iteration {}: {}'.format(i, x_new))
i += 1
|
from django.contrib import admin
from django.urls import path,include
from .views import ReportFormView
urlpatterns = [
path('', ReportFormView.as_view(), name='report'),
]
|
from django.conf import settings
from django.contrib import auth
from django.contrib.sites.models import Site
from django.shortcuts import redirect
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponseForbidden
from raygun4py.middleware.django import Provider
class HerokuDemoSetupMiddleware(object):
"""
Forces user to setup demo instance during the initial state.
There's a chance that user will try to open newly created instance by
typing an url in the browser window. That's why we have to ensure
that setup view is called as the first view.
"""
def process_request(self, request):
path = request.path
current_domain = Site.objects.get(pk=1).domain
if settings.HEROKU_DEMO and path != '/heroku-setup/' and current_domain == 'example.com':
return redirect('pontoon.heroku_setup')
class RaygunExceptionMiddleware(Provider):
def process_exception(self, request, exception):
# Ignore non-failure exceptions. We don't need to be notified
# of these.
if not isinstance(exception, (Http404, PermissionDenied)):
return (super(RaygunExceptionMiddleware, self)
.process_exception(request, exception))
class BlockedIpMiddleware(object):
def process_request(self, request):
try:
ip = request.META['HTTP_X_FORWARDED_FOR']
# If comma-separated list of IPs, take just the last one
# http://stackoverflow.com/a/18517550
ip = ip.split(',')[-1]
except KeyError:
ip = request.META['REMOTE_ADDR']
ip = ip.strip()
# Block client IP addresses via settings variable BLOCKED_IPS
if ip in settings.BLOCKED_IPS:
return HttpResponseForbidden('<h1>Forbidden</h1>')
return None
class AutomaticLoginUserMiddleware(object):
"""
This middleware automatically logs in the user specified for AUTO_LOGIN.
"""
def process_request(self, request):
if settings.AUTO_LOGIN and not request.user.is_authenticated():
user = auth.authenticate(
username=settings.AUTO_LOGIN_USERNAME,
password=settings.AUTO_LOGIN_PASSWORD
)
if user:
request.user = user
auth.login(request, user)
|
# Generated by Django 2.1.1 on 2018-09-16 07:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('lw2', '0016_auto_20180916_0310'),
]
operations = [
migrations.DeleteModel(
name='Test',
),
migrations.RemoveField(
model_name='comment',
name='af',
),
migrations.RemoveField(
model_name='post',
name='af',
),
migrations.RemoveField(
model_name='post',
name='html_body',
),
migrations.RemoveField(
model_name='post',
name='meta',
),
migrations.AlterField(
model_name='post',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from contextlib import contextmanager
import inspect
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.input_blob_def as input_blob_util
import oneflow.python.framework.hob as hob
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.placement_util as placement_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.runtime_mode as runtime_mode
import oneflow.python.framework.push_util as push_util
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.framework.scope_util as scope_util
import oneflow.python.framework.typing as oft
import oneflow.python.framework.typing_util as oft_util
import oneflow.python.lib.core.func_inspect_util as func_inspect_util
import oneflow.python.ops as ops
import typing
import oneflow
import oneflow._oneflow_internal
import inspect
def Compile(session, function_desc, config_proto):
with InterpretScope(session, function_desc, config_proto):
_CompileJob(session, function_desc)
session.StashJob(function_desc.job_func.__name__)
oneflow._oneflow_internal.CurJobBuildAndInferCtx_Complete()
session.StashJob(
function_desc.job_func.__name__,
function_desc.job_func.__name__ + "_after_complete",
)
def EagerRun(session, function_desc, config_proto, args):
with InterpretScope(session, function_desc, config_proto):
ret = _InterpretGlobalFunction(function_desc, args)
oneflow._oneflow_internal.CurJobBuildAndInferCtx_Complete()
session_ctx.GetDefaultSession().UpdateInfo4InterfaceOp()
return ret
@contextmanager
def InterpretScope(session, function_desc, config_proto):
job_conf = function_desc.job_config_proto
job_conf.set_job_name(function_desc.job_func.__name__)
placement_scope = function_desc.function_attribute.default_placement_scope
if placement_scope is None:
tag_and_dev_ids = placement_util.GetDefaultMachineDeviceIds(session.resource)
hierarchy = None
else:
assert isinstance(placement_scope, placement_ctx.EmptyPlacementScope)
tag_and_dev_ids = (
placement_scope.device_tag,
placement_scope.machine_device_ids,
)
hierarchy = placement_scope.hierarchy
distribute_strategy = function_desc.function_attribute.default_distribute_strategy
if distribute_strategy is None:
distribute_strategy = distribute_util.DistributeConsistentStrategy()
is_mirrored = isinstance(
distribute_strategy, distribute_util.DistributeMirroredStrategy
)
assert isinstance(hierarchy, (list, tuple)) or hierarchy is None
if hierarchy is not None:
hierarchy = oneflow._oneflow_internal.Size(tuple(hierarchy))
scope = scope_util.MakeInitialScope(
job_conf, *tag_and_dev_ids, hierarchy, is_mirrored
)
with _JobBuildAndInferCtx(job_conf.job_name()), distribute_strategy:
c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
with runtime_mode.ModeScope(runtime_mode.GLOBAL_MODE):
with scope_util.ScopeContext(scope):
yield
def _CompileJob(session, function_desc):
func = function_desc.job_func
parameters = func.__oneflow_function_signature__.parameters
if len(parameters) == 0:
func.__oneflow_input_blob_defs__ = ()
elif all(p.annotation is inspect._empty for _, p in parameters.items()):
func.__oneflow_input_blob_defs__ = _GetArgDefault(func)
elif all(p.annotation is not inspect._empty for _, p in parameters.items()):
func.__oneflow_input_blob_defs__ = _MakeInputBlobDefFromParameterSignature(
parameters
)
else:
raise NotImplementedError(
"All parameters of global function should be annotated"
)
inputs = _RecursiveMakeInputBlobs(func.__oneflow_input_blob_defs__)
ret = func(*inputs)
return_annotation = func.__oneflow_function_signature__.return_annotation
oft_util.CheckReturnByAnnotation(func.__name__, ret, return_annotation)
func.__oneflow_output_remote_blobs__ = _RecursiveMakeRetRemoteBlobs(
ret, allow_cpu_return_op=function_desc.function_attribute.allow_cpu_return_op
)
def _InterpretGlobalFunction(function_desc, args):
func = function_desc.job_func
parameters = func.__oneflow_function_signature__.parameters
if len(parameters) == 0:
func.__oneflow_input_blob_defs__ = ()
elif all(p.annotation is inspect._empty for _, p in parameters.items()):
func.__oneflow_input_blob_defs__ = _GetArgDefault(func)
elif all(p.annotation is not inspect._empty for _, p in parameters.items()):
func.__oneflow_input_blob_defs__ = _MakeInputBlobDefFromParameterSignature(
parameters
)
else:
raise NotImplementedError(
"All parameters of global function should be annotated"
)
inputs = push_util.MakeEagerInputBlobs(func.__oneflow_input_blob_defs__, args)
ret = func(*inputs)
return_annotation = func.__oneflow_function_signature__.return_annotation
oft_util.CheckReturnByAnnotation(func.__name__, ret, return_annotation)
return _RecursiveMakeRetRemoteBlobs(
ret, allow_cpu_return_op=function_desc.function_attribute.allow_cpu_return_op
)
@contextmanager
def _JobBuildAndInferCtx(job_name):
c_api_util.JobBuildAndInferCtx_Open(job_name)
try:
yield
finally:
oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
def _GetArgDefault(func):
if hasattr(func, "__oneflow_arg_default__"):
return func.__oneflow_arg_default__
return _CloneArgBlobDef(func_inspect_util.GetArgDefaults(func))
def _CloneArgBlobDef(args):
if isinstance(args, input_blob_util.ArgBlobDef):
return args.Clone()
if isinstance(args, (tuple, list)):
return type(args)(_CloneArgBlobDef(x) for x in args)
if isinstance(args, dict):
return {k: _CloneArgBlobDef(v) for k, v in args}
raise NotImplementedError(
"oneflow.global_function only accepts nested input blob defs"
)
def _RecursiveMakeInputBlobs(input_blob_def):
if isinstance(input_blob_def, input_blob_util.ArgBlobDef):
return ops.InputOpByArgBlobDef(input_blob_def)
if isinstance(input_blob_def, (tuple, list)):
return type(input_blob_def)(_RecursiveMakeInputBlobs(x) for x in input_blob_def)
if isinstance(input_blob_def, dict):
return {k: _RecursiveMakeInputBlobs(v) for k, v in input_blob_def.items()}
raise NotImplementedError(
"oneflow.global_function accepts "
+ "ArgBlobDefs or list/tuple/dict nested ArgBlobDefs as argument"
)
def _MakeInputBlobDefFromParameterSignature(parameters):
def CheckAndRecusiveMake(p):
return _RecusiveMakeInputBlobDef(p.annotation)
return tuple(CheckAndRecusiveMake(p) for _, p in parameters.items())
def _RecusiveMakeInputBlobDef(cls):
if oft.OriginFrom(cls, oft.OneflowNumpyDef):
return cls.NewInputBlobDef()
elif oft.OriginFrom(cls, typing.Tuple):
return tuple(_RecusiveMakeInputBlobDef(a) for a in cls.__args__)
else:
raise NotImplementedError(
("\nannotation %s" % cls)
+ "not supported"
+ "\nonly support oneflow.typing.Numpy.Placeholder, "
"oneflow.typing.ListNumpy.Placeholder"
)
def _RecursiveMakeRetRemoteBlobs(remote_blobs, **kwarg):
if remote_blobs is None:
return None
if isinstance(remote_blobs, oneflow._oneflow_internal.BlobDesc):
return ops.ReturnRemoteBlob(remote_blobs, **kwarg)
if isinstance(remote_blobs, (tuple, list)):
return type(remote_blobs)(
_RecursiveMakeRetRemoteBlobs(x, **kwarg) for x in remote_blobs
)
if isinstance(remote_blobs, dict):
return {
k: _RecursiveMakeRetRemoteBlobs(v, **kwarg) for k, v in remote_blobs.items()
}
raise NotImplementedError(
"oneflow.global_function returns "
+ "RemoteBlob or list/tuple/dict nested RemoteBlob only"
)
|
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Union
from typing import List
import numpy as np
from nncf.common.quantization.structs import QuantizerConfig
from nncf.common.quantization.structs import QuantizationMode
from nncf.experimental.onnx.statistics.collectors import ONNXMinMaxStatisticCollector
class QuantizerLayerParameters:
"""
Class handles Quantizer/Dequantizer layer attributes.
"""
def __init__(self, scale: List[float], zero_point: List[int], mode: QuantizationMode):
self.scale = scale
self.zero_point = zero_point
self.mode = mode
def calculate_scale_level(max_val: Union[float, np.ndarray], min_val: Union[float, np.ndarray],
num_bits: int,
mode: QuantizationMode) -> Union[float, np.ndarray]:
"""
Calculates Quantizer/Dequantizer layer scale level.
"""
if mode == QuantizationMode.SYMMETRIC:
input_abs_max = np.maximum(np.abs(max_val), np.abs(min_val))
return input_abs_max / ((2 ** num_bits - 1) / 2)
return (max_val - min_val) / 2 ** num_bits
def calculate_weight_quantizer_parameters(weight_tensor: np.ndarray, quantizer_config: QuantizerConfig) -> \
QuantizerLayerParameters:
"""
Calculates Quantizer/Dequantizer layer attributes for weight quantizer such as scale, zero_points and
quantization mode: symmetric, asymmetric.
"""
per_channel = quantizer_config.per_channel
num_bits = quantizer_config.num_bits
mode = quantizer_config.mode
if per_channel:
axes = tuple(range(len(weight_tensor.shape))[1:])
else:
axes = None
input_high = np.amax(weight_tensor, axis=axes)
input_low = np.amin(weight_tensor, axis=axes)
scales = calculate_scale_level(input_high, input_low, num_bits, mode)
zero_points = np.zeros_like(scales, dtype=np.int)
return QuantizerLayerParameters(scales.tolist(), zero_points.tolist(), mode)
def calculate_activation_quantizer_parameters(layer_statistics: ONNXMinMaxStatisticCollector,
quantizer_config: QuantizerConfig) -> QuantizerLayerParameters:
"""
Calculates Quantizer/Dequantizer layer attributes for activation quantizer such as scale, zero_points and
quantization mode: symmetric, asymmetric.
"""
num_bits = quantizer_config.num_bits
statistics = layer_statistics.get_statistics()
input_low = statistics.min_values
input_high = statistics.max_values
if input_low < 0:
mode = QuantizationMode.SYMMETRIC
else:
mode = QuantizationMode.ASYMMETRIC
scales = calculate_scale_level(input_high, input_low, num_bits, mode)
zero_points = np.zeros_like(scales, dtype=np.int)
return QuantizerLayerParameters(scales.tolist(), zero_points.tolist(), mode)
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from test.utils import TestbedTestCase
import os
import json
import main
VALID_PNG_IMAGE_DATA = '''data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAZAAAADMCAIAAADS5mKjAAAAGXRFWHRTb2Z0d2FyZQBBZG9
iZSBJbWFnZVJlYWR5ccllPAAAAyhpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek
5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuNi1jMTQwIDc5LjE2MDQ1M
SwgMjAxNy8wNS8wNi0wMTowODoyMSAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50
YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXB
NTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdX
JjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENDIDIwMTggKE1hY2ludG9zaCkiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6N
zM4MUM4RUY1NTIwMTFFODlGQUVCQTY4MkI0NkFFMjQiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6NzM4MUM4RjA1NTIwMTFFODlGQUVCQTY4MkI0NkFF
MjQiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3MzgxQzhFRDU1MjAxMUU4OUZBRUJBNjgyQjQ2QUUyNCIgc3RSZWY
6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3MzgxQzhFRTU1MjAxMUU4OUZBRUJBNjgyQjQ2QUUyNCIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3
g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Ph77jDMAABezSURBVHja7N17VE1Z4Afwub2VQiqVpCISJYlKKIznT2SMCYtBwpIhj2XGZJoxzGDQGIvlm
dfyNoMwnoUSlbrSS6WnpK7opffbb//yW41Jkx5nn3vOud/PH3fdzJp979l7n+/de999zxG9e/fuMwAAPpBDFQAAXyigCqAZtbW1RUVFb9++zc3NLSgoKCkp
KS4uLi0tJY/keXl5eUVFBXksKyurrKysqqqqqakhj9XV1eR/VKqnWI88UVVV7dChg4qKCnkkzzU0NNTU1NTV1cnzzp07a2lpkcf3f6LaAYEFzSEZlJGRkZm
Z+erVK4lEklWPPCF/kickgNh5Gzo6Orq6unp6et27d9fX1yeP3bp1I0969uxJ/h3NBCKsYckgkkTp6elJSUmJiYnkSXJyclxcHGup1DaGhob9+vUzMTHp3b
u3mZkZeUJSjAzW0JoILBAUMn1LSUmJj49/+vRpbGwsyabnz58L4LjI/NHc3NzCwqJ///7kSZ8+fXr06CEvL48WR2ABn5SVlZGhU3R0dGRkpFgsJo9VVVWyc
OAGBgbW1tY2NjZWVlYkyMgQTCQSoT8gsIBb6urqUlNT3ydUeHj4w4cPKyoqUC3GxsZ2dnYN+aWtrY06QWCB1CQkJJBsCgwMvHPnzqtXr1AhzSOxNXr06BEj
RpAUwxI+AgvYkJSUFBISEhwcfPv27ZcvX6JC2obMHMeMGTN8+HASXjo6OqgQBBYwJicnhyRUQEDArVu3hLFezimDBw8eN24cyS8SXmpqaqgQBBa0RWxs7OX
Ll69duxYWFobaYIGiouLYsWP/p17Pnj1RIQgs+LTHjx+TnDp9+nRqaipqQ1psbW1dXV1JcvXp0we1gcCCfyGtQIZRfn5+Z86cyczMRIVwh4WFBUkuZ2dnS0
tL1AYCS6bV1NQ8ePDg0qVLJKfevHmDCuEyIyOj2bNnT506dejQoagNBJZsCQ4OPl2vqKgItcEvurq6M2fOnDVrFpILgSVw6enpJKT279+PHQkCYGJisnjxY
pJchoaGqA0ElnCUlJT4+fkdPHiQDKxQG8IzatQod3f3KVOmdOzYEbWBwOKxhw8fHjp06MSJE3V1dagNYZOTk/v6668XLlw4fPhw1AYCi08KCwtPnTq1c+dO
bE2QQUZGRitWrJgzZw5+vci8d8CoqKioefPmoV8BMXPmzEePHuGkYBACixk1NTUXL160tbXFWQqNDBgw4Pjx4xUVFThN2g9TwvbKy8vz9fXdtm1bfn4+agP
+i4qKysqVK5cuXYqvFDEllI6EhIT58+ejC0GrODs7h4SE4PTBlJA9MTExkydPxrkHbWZjY+Pv749TCYFFV2ho6JgxY3C+ASPMzc39/PxwWiGwmHfr1i2sqQ
MNenp6Bw8eLC8vx1mGwGLAhQsXjI2NcV4BVYqKilu3bi0rK8MZh8Bqo9u3b+NySMAmBQUFHx+fqqoqnH3Y1tAK9+/f9/T0jIqKQlU00NLS6tatW+fOnbW1t
bXqqaurd6inrKxMHpWUlOTl5d/fmF4kElXXq6lXUa+8HhlE5Ofnv3nzJjc3t6CggDxmZWXV1taihhuoqqr+/vvvixYtkpOTQ218CIHVGAkpElUksGTz8Eni
9KpnYmJiZGRkaGioo6PTtWtXklPkkZxIjL8iCTUSW4WFhe9TLDs7+3m9lJSU1NRU8p9ktit26dJl586d+OEEAqtp6enp33zzzfXr12XqqEk2WVlZmZmZ9ev
Xz9TUlPxJgokj762ysvLly5cvXrxIS0tLricWi8mfMtVABgYGe/fudXZ2xhmKwPp/xcXFXl5ee/bskYW5xrBhwwYNGmRubv4+nnh3h77MzMyEhIT4+HgyFo
6IiCBPZKGLWltbHzx4cPDgwQgsmQ4scvi7d+9euXKlUOtBJBINGTLEzs6O9HVLS8s+ffrQmNZJkUQiiYuLi4mJIeFFJvLkTwF3VxcXl127dsnyj3tkOrDI7
M/d3V14XVxZWXncuHEODg4kpMh0T0tLS0YatLa2NiUl5cmTJ2TmGBwcHB4eLsjDXL169c8//yybFwuU0cBKSkpasGBBSEiIkA5qxIgRY8eOHTlyJBlSCWwY
1TavX78myRUaGhoUFCSwa72SgfP+/fsXL16MwBK+9evXb968WRjHYm5uPnHiREdHx2HDhnFnsZyDcnNzSWb5+/tfvnw5OztbGAfVv3//s2fPDhgwQIYaUqZ
2nZFPWn19fb43maKi4vTp048dO/b8+XPsJGyturq66OhoHx8fMhQVxins6ekpOxfbkpXAIh+wM2bM4HW/1NLSWrZs2fXr10tKSpA7jJBIJKdOnZo1a5aKig
qv+4aqqurFixcRWAJx4MABMufnaV/U1dVdvXr1w4cPydAAEUMJGaEEBASQeub1F3COjo6CH3QLPLCSk5OtrKx4Op5as2ZNWFgY0oRlZMLo7e3N3+TavHkzA
ouX+LiyrqamtnTp0sDAwJqaGmSHdInF4nXr1vFuYy3Rt2/fJ0+eILB4Iz4+nndXWXBxcbly5Qp+ps81tbW15PNj4cKFioqK/OpRa9euRWDxwPbt23nUqyws
LPbv35+bm4to4LiysrLz58/z63qzRkZGsbGxCCyOkkgkdnZ2vOhJCgoKZOon1HG7sGVkZGzatIlHU8UtW7YgsDjn7NmzvOg9VlZWJ06cKC0txZnPa3V1df7
+/mQiz4teZ2Nj8+LFCwQWJ5CTnxd7rNzc3DCkEp6srKyNGzd26dKF+z3w+PHjCCwpE4vFnTp14nIv0dTU9PHxyc/Px7kt7AGXn58f929T8uWXX1ZWViKwpG
PXrl1c7hyWlpYXLlzAhk+ZEhkZOXPmTC53S21tbf6O9PkaWOXl5VxePsDdfWWcRCJZt24dl6/IfuDAAQQWS5KTk/X09LjZDzw8PNLS0nDGAlFUVOTj48PZJ
YsFCxYgsKjz8/PjYNuTz9Kff/65oKAAZyk0Ultbe+zYMQMDAw72WzMzs+zsbAQWLT/99BPXmlwkEm3YsAFXUIBPunTpkrm5OQc/a+/du4fAYlhpaemkSZO4
FlVkVEWG/TgVoeUCAgKsra25Fls7duxAYDEmMTFRW1ubU1Hl7e1dXFyM0w/ahgxquBZbM2bM4P5P7j/jRdNyql2/++67wsJCnHLQfnfv3uXUBY4HDRrE8XV
YrgfWqVOnuNOc8+fPz8nJwWkGjK9tcedbbzKVSUpKQmC1xdatWznSihMmTCDTUpxaQI+vry93rtQcFBSEwGodjtzCyNTUNDg4GKcTsKCysnL9+vUcyayTJ0
8isFrabOPGjePC172HDh3CWQQsk0gk06dP50JmcfBqy5wLrOLiYi58e+Lh4YGtVSBFISEhvXr1kvqJsGrVKgRWc58tZAom3RaytbV9+vQpThjgAjLGl5eXx
y94uBhY6enp0r2okIqKyp9//omTBLg255D6eu60adMQWP+SkZGhqakpxSZxd3fHVUCBs8RisYmJiXS/KOfChZI4EVhZWVlSvIO8gYEBLgUDvPDbb79JMbMc
HBzKy8tlPbByc3ONjIyk1Qbr1q3DaQA8kp6ePnjwYGmdL/b29tK9YKno/0JLegoKCuzs7JKSkth/aV1d3atXr9rY2HwGwDe7du1auXKlVF7ayclJir+Wk+Y
VEd++fTty5EippNXy5cuzs7ORVsBTnp6eKSkpFhYW7L90YGCgNK+bIq2hHRlYSmVkq6SkFBAQgJkFCIO3t7dUcsPFxUW21rCkspd9ypQpuHwVCEx4eLiOjg
77Z5Obm5usBNaCBQvYr98jR46gc4MgVVdXz549m/1zauPGjcIPrB9//JHlajUxMeHyFTMAGHH48GH2M+vYsWNCDixfX1+WK3TJkiW4MyDIiLi4OPanh/7+/
qwdIKvbGu7duzd69Gg2q/LEiRNz5szBl0ogO8rLy11dXa9evcraK4pEIjKD6d27t6C2NaSnp0+cOJG1l9PU1IyKikJagazp0KHDlStXfv31VzZ3GkybNo0E
pXACq6KiwsXFpbKykp2Xc3R0zMzMHDhwILovyCYvL68bN26w9nJkKsrON2ksBRY5mJiYGHZea9myZYGBgaqqqui1IMsmTJjw7NkzXV1ddl7u3LlzmzZtYmM
4R9v27dtZa6R9+/Zh5RWgQWlp6ciRI1k7Aa9evcrvRfegoCAnJyd2KuvmzZvjx4/HRytAI+7u7uxseiAzm6SkpO7du/NySlhYWMjOfjY5ObmIiAikFUCTfH
19v//+exZeqKysbNGiRXxdw1qyZEl2djbtOtLW1k5JScEvmQGasXnzZh8fHxZe6MaNG/Su20VxSrhnz57ly5ezkFZisdjQ0BA9EuCTDh06xM4Flx88eODg4
MCbwIqKiho0aBDtSunUqVN0dHTPnj3REQFa6OTJk3PnzqX9KmZmZuTcVFJS4seU0MPDg3aNaGhoPHnyBGkF0Cpz5sw5e/Ys7VdJTEz08vJivFgqgbVx48bQ
0FCq1aGgoEBewtjYGP0PoLVcXV2PHj1K+1V8fHxu377N9SlhSEgIjblrI5GRkSxMOQEEjIVVZiMjo6dPnzK4i1t+w4YNzL5FFxcXiURCtRbCwsLwnSBAOw0
dOlRFReXOnTv0XqKwsLCuru7zzz9nrERm96GycBsiNq9lASB4a9eupX3OisViLu50f/bsmZmZGdUjP3z4sJubGz4bARg0ffr0ixcv0iufwRvtMLno/u2331
KtVi8vL6QVAONOnz5NdY0lMDCQqR8GMTbCIglNcprqh8Bff/2FvgVAw+vXry0sLMgjpfI1NTWTk5PJIydGWDU1NevXr6dXm8bGxseOHUOvAqBER0fn8uXL9
MrPz8/fsWMHV6aEu3btSkxMpHe058+f79ixI3oVAD12dnbkRKZX/pYtW8ggS/qBlZWVRWNLa4NDhw5hEwMAC1asWPHVV19RzSzpB9a2bduqqqooHaGbm5u7
uzt6EgA7Dhw4YGBgQKnwo0ePhoeHt6eE9i66k5lgv379KB0eqbj4+Hh1dXV0IwDWBAYGjho1ilLhLi4uly5dktoIi+rlj319fZFWACxzcnKit8jj5+f34ME
D6YywoqOjraysKB3YmjVrGPlaAQBaq66ubsiQIZGRkTQKnzBhQpvv6NOuwJozZ86pU6doHFLfvn1jYmIYv5gOALRQaGjosGHDKBV+586dtt1Tue1TQhIolN
Lq/UwTaQUgRfb29itXrqRU+N69e9v2P7Y9sPbt20fpYFxdXZ2dndFjAKTrhx9+aP/e9CZduHAhIiKCvcCKj4/fv38/pWr65Zdf0FcApK5r165//PEHpcLbN
uJpY2AdOHCAXlr17t0bfQWAC+bOncvk1aw+cPTo0dTUVDYCKysra/fu3TSOwcDAwNPTE70EgDvo/UzY19eXjcA6fvw4pXvteHt74zeDAJzi5OQ0a9YsGiXv
3bu3sLCwVf9Lq7c1lJWVmZqa0rg9qrW19ePHj9E/ALiG3l37yCBr4cKFFEdYFy9epHQzZ6q/oAaANrOyslqxYgWNklt7Yb9Wj7DI+DAoKIjx9z106NBHjx6
hZwBwU1xcnIWFBY2SQ0JC7O3tqYywwsLCaKQVsWrVKvQJAM4aMGDA/PnzaZR85swZWlPCEydO0HjHxsbGX3zxBfoEAJdRuonhkSNH8vPzmQ+svLw8UjSNd+
zp6Ykf4gBwnLW19ZQpUxgvtrS09ObNm8wH1rVr1yoqKhh/u506dZo3bx56AwD3LVmyhEax586dYz6wWjXVbDkPD4/OnTujKwBw36RJk2gsvV+5ciUtLY3Jw
IqPj2/5sK1VZs6ciX4AwBeLFy+mUezff//NZGBRujEsCWxLS0t0AgC+mDFjhqKiorRmhS0NrPPnz9M4eKxeAfBLt27dFi1axHixISEh0dHRzARWZGRkbGws
429RW1ubjLDQAwD4hdI93u/evctMYF27do3G+3N1dcVPnQF4x9HRsVevXowX25JlrBYFFqUFLBcXF7Q9AO/Iy8vTuN8qGWGlp6e3N7DEYnFUVBTjb87U1NT
JyQltD8BHU6dOpVFscHBwewOL0m6GGTNmkJxGwwPwka2tLY1b/H1y9enTgXX9+nUaBzx+/Hi0OgB/TZ48mfEyyfCo+Uv6fSKwUlJSQkNDGX9b+vr6Q4cORZ
MD8BeNa70XFRU1vwD1icAKCQmhcajOzs4qKipocgD+srOzMzAwYLzY+/fvtz2w/P39aRzquHHj0N4AvKasrExjVtj8bqzmAqusrOzOnTuMvyElJaXhw4ejv
QH4buzYsYyXGRwcnJWV1ZbAIpNJiUTC+BsaOXKkjo4OGhuA72xsbBgvs66uTiwWtyWwPrknos2BhZYGEABDQ0MHBwcagywOBRbmgwCCQeN0joiIaHVgFRcX
09jQoKamhuvJAAiGnZ0d42WS5MnLy2tdYEVHR7f8yvAtZ29v37VrVzQzgDDY2tqKRCJmy6yurv6veyr/Z2A9fPiQxuFhAQtASPT09AYPHsx4sf91bSy5Nkw
j24PSvRgBQFpoBNZ/bVlvOrDKy8vv3btH49gGDhyIBgYQkpbft7nlyJSwyXt0NR1Yz549o7GAZWlpaWxsjAYGEJJ+/foxXmZmZmZ2dnZLAyshIYHGgfXv3x
+tCyAwZCCira3NeLFNLmM1HVhhYWE0DszW1hatCyAwKioqZmZmjBebmpra0sCKi4ujcWCGhoZoXQDhobHu3uQ+0CYC682bN0FBQZSGjmhaAOExNTVlvMzk5
OQWBVZWVlZtbS3jL6+pqamrq4umBRAeGpdLjo2NzczM/HRgxcTE0DgkOzs7NTU1NC2A8Ojp6dEolsz2Ph1YGRkZNF67R48eaFcAQTIyMqIxK/x4u0ITgRUe
Hk7jkGhs1gAALhCJRN27d2e82JSUlE8HVlpaGo1D6t27N9oVQKgGDBjAeJkfZ5Hcx/PB+Ph4GsdDI4ABgCNMTEwYL/Pj9fTGgfX69WsaB6OmpkZmuWhUAKG
isUidk5PT6BeFjQMrKSmJxsHo6Oh07twZjQogVDS2hUskkpcvXzYXWJQWsGgMFwGAO2j8nJB49epVc4H18U4tRvTq1QstCiBgmpqaXbp0oTHIai6wKO0a7d
atG1oUQMA6depEY1zS6Ac6/wqsmpqaJq9B037YNQogeDR+e9fopqpyjaaLjZa4MMICACme5o32LfwrsMjw6t27dzSOBLd6BhA8fX19xsts7ltCGpdFbpjfo
jkBhI3GT6CfP39eVVXVdGC9ePGCxmEoKyvjXoQAgkfjW8K8vLzi4uKmAys9PZ3SzJbGkQAAp9DYilVdXZ2Tk8PqlFBTU1NRURHNCYARVhsUFBQ0HViN9mgx
BQtYALKA0s/vyKyw6cCitAkLV0YGkAVkaCIvL894sU2vYVVUVFBadNfS0kJbAghehw4dNDU1GS/2wwsl/xNYJSUlH84VuT+zBQBOUVVVpbHjMjc3t4nAIhP
FmpoaGoehoaGBtgSQBTRO9qbXsN6+fUsvd9GQALKAxjdsTX9LWFhYyKNjAAAZCazS0tImAuvDpXhmqauroyEBZEHHjh0ZL/PDyR8bIywaxwAAMh1YH65sMQ
trWAAIrDYjY6nKysrGgYVFdwBoJzU1NcbLLC4ubiKwSkpKEFgAwLXAKi0tra6ubhxYRUVFlI5BSUkJDQmAwGqbqqqqJkZYlL4llJOTU1ZWRkMCyAJK06kmR
lhlZWU0XklRUVFBQQENCSALKG0JaEgn6lNCFRUVBBaAjCDnO41iG25YL/fxPzFLQ0OD0jEAgIxMCRsu665AO7Bqa2uzs7O7dOlC71tIAOACfX19SvvPG9aw
FBr+Li8vp/FKJK1wn3oAYGSEJUc7sAAAmBph/RNYH/4kGgCAuyOs2trahq1ZAACc0nBt0X8Ci9LlRgEA2qmurq5xYDX8EwAAp5CA+ldgkeHVu3fvUC8AwIP
AAgDgzZQQAAAjLAAABBYAyIyGFXYEFgBghAUAwJDGG0cBALgPgQUACCwAAAQWACCwAAAQWAAACCwAQGABACCwAAAQWACAwAIAQGABACCwAACBBQDACaL3V8
aqqqpKSEggz+XkEGEAwCEVFRXGxsba2tr/BBYAAPf9rwADAMxpA4XYbUkLAAAAAElFTkSuQmCC'''
VALID_JPG_IMAGE_DATA = '''data:image/jpeg;base64,/9j/4AAQSkZJRgABAAEBLAEsAAD/4QBEVGhpcyBpcyBhbiB1bmtub3duIEFQUCBtYXJrZX
IuIENvbXBsaWFudCBkZWNvZGVycyBtdXN0IGlnbm9yZSBpdC4K/+IARFRoaXMgaXMgYW4gdW5rbm93biBBUFAgbWFya2VyLiBDb21wbGlhbnQgZGVjb2Rlc
nMgbXVzdCBpZ25vcmUgaXQuCv/jAERUaGlzIGlzIGFuIHVua25vd24gQVBQIG1hcmtlci4gQ29tcGxpYW50IGRlY29kZXJzIG11c3QgaWdub3JlIGl0Lgr/
5ABEVGhpcyBpcyBhbiB1bmtub3duIEFQUCBtYXJrZXIuIENvbXBsaWFudCBkZWNvZGVycyBtdXN0IGlnbm9yZSBpdC4K/+UARFRoaXMgaXMgYW4gdW5rbm9
3biBBUFAgbWFya2VyLiBDb21wbGlhbnQgZGVjb2RlcnMgbXVzdCBpZ25vcmUgaXQuCv/mAERUaGlzIGlzIGFuIHVua25vd24gQVBQIG1hcmtlci4gQ29tcG
xpYW50IGRlY29kZXJzIG11c3QgaWdub3JlIGl0Lgr/5wBEVGhpcyBpcyBhbiB1bmtub3duIEFQUCBtYXJrZXIuIENvbXBsaWFudCBkZWNvZGVycyBtdXN0I
Glnbm9yZSBpdC4K/+gARFRoaXMgaXMgYW4gdW5rbm93biBBUFAgbWFya2VyLiBDb21wbGlhbnQgZGVjb2RlcnMgbXVzdCBpZ25vcmUgaXQuCv/pAERUaGlz
IGlzIGFuIHVua25vd24gQVBQIG1hcmtlci4gQ29tcGxpYW50IGRlY29kZXJzIG11c3QgaWdub3JlIGl0Lgr/6gBEVGhpcyBpcyBhbiB1bmtub3duIEFQUCB
tYXJrZXIuIENvbXBsaWFudCBkZWNvZGVycyBtdXN0IGlnbm9yZSBpdC4K/+sARFRoaXMgaXMgYW4gdW5rbm93biBBUFAgbWFya2VyLiBDb21wbGlhbnQgZG
Vjb2RlcnMgbXVzdCBpZ25vcmUgaXQuCv/sAERUaGlzIGlzIGFuIHVua25vd24gQVBQIG1hcmtlci4gQ29tcGxpYW50IGRlY29kZXJzIG11c3QgaWdub3JlI
Gl0Lgr/7QBEVGhpcyBpcyBhbiB1bmtub3duIEFQUCBtYXJrZXIuIENvbXBsaWFudCBkZWNvZGVycyBtdXN0IGlnbm9yZSBpdC4K/+4ARFRoaXMgaXMgYW4g
dW5rbm93biBBUFAgbWFya2VyLiBDb21wbGlhbnQgZGVjb2RlcnMgbXVzdCBpZ25vcmUgaXQuCv/vAERUaGlzIGlzIGFuIHVua25vd24gQVBQIG1hcmtlci4
gQ29tcGxpYW50IGRlY29kZXJzIG11c3QgaWdub3JlIGl0Lgr/wAARCAEAAQADABEAAREBAhEC/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCwwZEhMPFB0aHx
4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/9sAQwEJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyM
jIyMjIyMjIyMjIyMjIy/9sAQwIJCQkMCwwYDQ0YMiEcITIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIy/8QBogAA
AQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoLEAACAQMDAgQDBQUEBAAAAX0BAgMABBEFEiExQQYTUWEHInEUMoGRoQgjQrHBFVLR8CQzYnKCCQoWFxgZGiU
mJygpKjQ1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0t
PU1dbX2Nna4eLj5OXm5+jp6vHy8/T19vf4+foBAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKCxEAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhc
RMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaX
mJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMAAAERAhEAPwDwPZX4afiEQ2UG8Q2UHRENlBv
ENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbRDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2U
G8Q2UHRENlBvENlBvENlBvEsbKD+I4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg
3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeJY2UH8RxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG
8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBtENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8SxsoP4j
iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDaIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGy
g3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4ljZQfxHENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2U
G8Q2UG0Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxLGyg/iOIbKDeIbKDoiGyg3iGyg3iGyg3iGyg
3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoNohsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iG
yg3iGyg3iWNlB/EcQ2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbRDZQbxDZQbxDZQdEQ2UG8Q2
UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvEsbKD+I4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGy
g3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeJY2UH8RxDZQbxDZQdEQ2U
G8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBtENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxD
ZQbxDZQdEQ2UG8Q2UG8Q2UG8SxsoP4jiGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDaIbKDeIb
KDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4ljZQfxHENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZ
QbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG0Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxLGyg/
iOIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoNohsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeI
bKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iWNlB/EcQ2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxD
ZQbxDZQbRDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvEsbKD+I4hsoN4hsoOiIbKDeIbKDeIbKDeIb
KDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDe
IbKDeIbKDeJY2UH8RxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBtENlBvENlBvENlB0RDZQbx
DZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8SxsoP4jiGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiI
bKDeIbKDeIbKDeIbKDeIbKDaIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4k+yg/iOIbKDeIbKDoiG
yg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoNohsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDe
IbKDeIbKDoiGyg3iGyg3iGyg3iWNlB/EcQ2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbRDZQbx
DZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvEsbKD+I4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeI
bKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeJY2U
H8RxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBtENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQb
xDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8SxsoP4jiGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDe
IbKDeIbKDaIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4ljZQfxHENlBvENlB0RDZQbxDZQbxDZQbx
DZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG0Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0RDZQ
bxDZQbxDZQbxLGyg/iOIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoNohsoN4hsoN4hsoOiIbKD
eIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iWNlB/EcQ2UG8Q2UHRENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlBvENlB0
RDZQbxDZQbxDZQbxDZQbxDZQbRDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvEsbKD+I4hsoN4hsoOi
IbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hso
N4hsoN4hsoOiIbKDeIbKDeIbKDeJY2UH8RxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENlBvENlBtENlB
vENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8SxsoP4jiGyg3iGyg6IhsoN4hsoN4hsoN4hsoN4hsoN4hsoN
4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDaIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hsoN4lj
ZQfxHENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG0Q2UG8Q2UG8Q2UHRENlBvENlBvENlBvENl
BvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxLGyg/iOIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg3iGyg6IhsoN4hsoN4hso
N4hsoN4hsoNohsoN4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iWNlB/EcQ2UG8Q2UHRENlBvENlBvENlB
vENlBvENlBvENlBvENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbRDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UHREN
lBvENlBvENlBvEsbKD+I4hsoN4hsoOiIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDeIbKDoiGyg3iGyg3iGyg3iGyg3iGyg2iGyg3iGyg3iGyg6Ihs
oN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoN4hsoOiIbKDeIbKDeIbKDeJY2UH8RxDZQbxDZQdEQ2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2UG8Q2U
HRENlBvENlBvENlBvENlBvENlBtENlBvENlBvENlB0RDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQbxDZQdEQ2UG8Q2UG8Q2UG8T/9kK'''
class MainTests(TestbedTestCase):
def setUp(self):
super(MainTests, self).setUp()
self.app = main.app
self.app.config['TESTING'] = True
self.client = self.app.test_client()
def test_request_to_unknown_url(self):
response = self.client.get('/unknown')
self.assertEqual('404 NOT FOUND', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(404, data.get('code'))
# We don't really care what the message says,
# as long as it was a 404 status
def test_clusteranalysis_request_with_incorrect_http_method(self):
response = self.client.get('/clusteranalysis')
self.assertEqual('405 METHOD NOT ALLOWED', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(405, data.get('code'))
# We don't really care what the message says,
# as long as it was a 405 status
def test_clusteranalysis_request_with_missing_json_header(self):
response = self.client.post('/clusteranalysis',
data=dict())
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(400, data.get('code'))
self.assertEqual('The request content type must be application/json',
data.get('message'))
def test_clusteranalysis_request_with_empty_payload(self):
response = self.client.post('/clusteranalysis',
json={})
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(400, data.get('code'))
self.assertEqual('The request is missing the image parameter',
data.get('message'))
def test_clusteranalysis_request_with_invalid_image_data(self):
response = self.client.post('/clusteranalysis',
json={'image': 'invaliddata'})
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(400, data.get('code'))
self.assertTrue(
data.get('message').startswith('Unable to process image data:'))
def test_clusteranalysis_request_with_non_png_image_data(self):
response = self.client.post('/clusteranalysis',
json={'image': VALID_JPG_IMAGE_DATA})
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(400, data.get('code'))
self.assertEqual('Only png images are accepted', data.get('message'))
def test_clusteranalysis_request_with_invalid_directions(self):
response = self.client.post('/clusteranalysis',
json={
'image': VALID_PNG_IMAGE_DATA,
'direction': 'invalid'
})
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertFalse(data.get('success'))
self.assertEqual(400, data.get('code'))
self.assertEqual("Invalid read order 'invalid'", data.get('message'))
def test_invalid_request_with_plain_image(self):
json_path = os.path.join(os.path.dirname(__file__),
'invalid_request.json')
invalid = open(json_path)
payload = json.load(invalid)
response = self.client.post('/clusteranalysis', json=payload)
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertEqual(400, data.get('code'))
# This is specific to the particular error in the payload but does
# test that the correct message is returned to the user.
self.assertEqual(
"No pixels within the threshold range, please try a higher contrast image.",
data.get('message'))
def test_valid_clusteranalysis_requests(self):
response = self.client.post('/clusteranalysis',
json={'image': VALID_PNG_IMAGE_DATA})
data = json.loads(response.data)
self.assertEqual('200 OK', response.status)
data = json.loads(response.data)
self.assertTrue(data.get('success'))
self.assertEqual(200, data.get('code'))
self.assertIsInstance(data.get('result'), dict)
self.assertIn('direction', data.get('result'))
self.assertEqual('rtl', data.get('result').get('direction'))
self.assertIn('clusters', data.get('result'))
self.assertIsInstance(data.get('result').get('clusters'), list)
for cluster in data.get('result').get('clusters'):
self.assertIsInstance(cluster, dict)
self.assertIn('bounds', cluster)
self.assertIsInstance(cluster.get('bounds'), dict)
self.assertIn('x', cluster.get('bounds'))
self.assertIn('y', cluster.get('bounds'))
self.assertIn('height', cluster.get('bounds'))
self.assertIn('width', cluster.get('bounds'))
def test_scipy_error_is_handled(self):
"""
Asserts that a request with an image that causes
an error is handled (see Issue #13).
"""
json_path = os.path.join(os.path.dirname(__file__),
'scipy_error_request.json')
invalid = open(json_path)
payload = json.load(invalid)
response = self.client.post('/clusteranalysis', json=payload)
self.assertEqual('400 BAD REQUEST', response.status)
data = json.loads(response.data)
self.assertEqual(400, data.get('code'))
self.assertEqual(
main.QHULL_ERROR_MESSAGE,
data.get('message'))
def test_warmup_request_responds_200(self):
"""
Asserts that a reuqest to /_ah/warmup is handled.
"""
response = self.client.get('/_ah/warmup')
self.assertEqual('200 OK', response.status)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 11:37:29 2022
@author: debangshu
"""
import requests
from bs4 import BeautifulSoup
def format_title(title):
title=title.lower()
title_tokens=title.split(' ')
title='-'.join(title_tokens)
return title
def find_movie(title):
try:
title=format_title(title)
url="https://www.justwatch.com/in/movie/"+title
r = requests.get(url, allow_redirects=True)
soup=BeautifulSoup(r.content,'html.parser')
platforms_list=soup.find("div",{"class":"price-comparison__grid__row price-comparison__grid__row--stream"})
# platforms_list=soup.find("div",{"class":"price-comparison__grid__row__holder"})
# print(platforms_list)
children=platforms_list.findChildren()
platforms=[]
for child in children:
platforms.append(str(child))
streams=[]
for platform in platforms:
if platform.find('class="price-comparison__grid__row__icon"') and platform[0:4]=="<img":
stream=platform[10:]
end=stream.find("\"")
stream=stream[:end]
streams.append(stream)
#No streaming services found
if len(streams)==0:
return False
return streams
except:
return "Unable to fetch info"
def find_tvseries(title):
try:
title=format_title(title)
url="https://www.justwatch.com/in/tv-show/"+title
r = requests.get(url, allow_redirects=True)
soup=BeautifulSoup(r.content,'html.parser')
platforms_list=soup.find("div",{"class":"price-comparison__grid__row price-comparison__grid__row--stream"})
# platforms_list=soup.find("div",{"class":"price-comparison__grid__row__holder"})
# print(platforms_list)
children=platforms_list.findChildren()
platforms=[]
for child in children:
platforms.append(str(child))
streams=[]
for platform in platforms:
if platform.find('class="price-comparison__grid__row__icon"') and platform[0:4]=="<img":
stream=platform[10:]
end=stream.find("\"")
stream=stream[:end]
streams.append(stream)
#No streaming services found
if len(streams)==0:
return False
return streams
except:
return "Unable to fetch info"
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 9
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_2_2.api_client import ApiClient
class PerformanceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_performance_dataset(self, performance_dataset, **kwargs): # noqa: E501
"""create_performance_dataset # noqa: E501
Create a new dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_performance_dataset(performance_dataset, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceDatasetCreateParams performance_dataset: (required)
:param bool force: For use by support only.
:return: CreatePerformanceDatasetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_performance_dataset_with_http_info(performance_dataset, **kwargs) # noqa: E501
else:
(data) = self.create_performance_dataset_with_http_info(performance_dataset, **kwargs) # noqa: E501
return data
def create_performance_dataset_with_http_info(self, performance_dataset, **kwargs): # noqa: E501
"""create_performance_dataset # noqa: E501
Create a new dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_performance_dataset_with_http_info(performance_dataset, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceDatasetCreateParams performance_dataset: (required)
:param bool force: For use by support only.
:return: CreatePerformanceDatasetResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_dataset', 'force'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_performance_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_dataset' is set
if ('performance_dataset' not in params or
params['performance_dataset'] is None):
raise ValueError("Missing the required parameter `performance_dataset` when calling `create_performance_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'force' in params:
query_params.append(('force', params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'performance_dataset' in params:
body_params = params['performance_dataset']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/datasets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreatePerformanceDatasetResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_performance_dataset(self, performance_dataset_id, **kwargs): # noqa: E501
"""delete_performance_dataset # noqa: E501
Delete the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_performance_dataset(performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_dataset_id: Delete the performance dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_performance_dataset_with_http_info(performance_dataset_id, **kwargs) # noqa: E501
else:
(data) = self.delete_performance_dataset_with_http_info(performance_dataset_id, **kwargs) # noqa: E501
return data
def delete_performance_dataset_with_http_info(self, performance_dataset_id, **kwargs): # noqa: E501
"""delete_performance_dataset # noqa: E501
Delete the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_performance_dataset_with_http_info(performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_dataset_id: Delete the performance dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_dataset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_performance_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_dataset_id' is set
if ('performance_dataset_id' not in params or
params['performance_dataset_id'] is None):
raise ValueError("Missing the required parameter `performance_dataset_id` when calling `delete_performance_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
if 'performance_dataset_id' in params:
path_params['PerformanceDatasetId'] = params['performance_dataset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/datasets/{PerformanceDatasetId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_performance_dataset(self, performance_dataset_id, **kwargs): # noqa: E501
"""get_performance_dataset # noqa: E501
Retrieve the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_dataset(performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_dataset_id: Retrieve the performance dataset. (required)
:return: PerformanceDatasets
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_performance_dataset_with_http_info(performance_dataset_id, **kwargs) # noqa: E501
else:
(data) = self.get_performance_dataset_with_http_info(performance_dataset_id, **kwargs) # noqa: E501
return data
def get_performance_dataset_with_http_info(self, performance_dataset_id, **kwargs): # noqa: E501
"""get_performance_dataset # noqa: E501
Retrieve the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_dataset_with_http_info(performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_dataset_id: Retrieve the performance dataset. (required)
:return: PerformanceDatasets
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_dataset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_performance_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_dataset_id' is set
if ('performance_dataset_id' not in params or
params['performance_dataset_id'] is None):
raise ValueError("Missing the required parameter `performance_dataset_id` when calling `get_performance_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
if 'performance_dataset_id' in params:
path_params['PerformanceDatasetId'] = params['performance_dataset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/datasets/{PerformanceDatasetId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceDatasets', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_performance_metric(self, performance_metric_id, **kwargs): # noqa: E501
"""get_performance_metric # noqa: E501
View a single performance metric. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_metric(performance_metric_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_metric_id: View a single performance metric. (required)
:return: PerformanceMetrics
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_performance_metric_with_http_info(performance_metric_id, **kwargs) # noqa: E501
else:
(data) = self.get_performance_metric_with_http_info(performance_metric_id, **kwargs) # noqa: E501
return data
def get_performance_metric_with_http_info(self, performance_metric_id, **kwargs): # noqa: E501
"""get_performance_metric # noqa: E501
View a single performance metric. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_metric_with_http_info(performance_metric_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str performance_metric_id: View a single performance metric. (required)
:return: PerformanceMetrics
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_metric_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_performance_metric" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_metric_id' is set
if ('performance_metric_id' not in params or
params['performance_metric_id'] is None):
raise ValueError("Missing the required parameter `performance_metric_id` when calling `get_performance_metric`") # noqa: E501
collection_formats = {}
path_params = {}
if 'performance_metric_id' in params:
path_params['PerformanceMetricId'] = params['performance_metric_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/metrics/{PerformanceMetricId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceMetrics', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_performance_metrics(self, **kwargs): # noqa: E501
"""get_performance_metrics # noqa: E501
List all metrics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_metrics(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param str dir: The direction of the sort.
:return: PerformanceMetricsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_performance_metrics_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_performance_metrics_with_http_info(**kwargs) # noqa: E501
return data
def get_performance_metrics_with_http_info(self, **kwargs): # noqa: E501
"""get_performance_metrics # noqa: E501
List all metrics. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_metrics_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param str dir: The direction of the sort.
:return: PerformanceMetricsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'dir'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_performance_metrics" % key
)
params[key] = val
del params['kwargs']
if ('sort' in params and
len(params['sort']) > 255):
raise ValueError("Invalid value for parameter `sort` when calling `get_performance_metrics`, length must be less than or equal to `255`") # noqa: E501
if ('sort' in params and
len(params['sort']) < 0):
raise ValueError("Invalid value for parameter `sort` when calling `get_performance_metrics`, length must be greater than or equal to `0`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `get_performance_metrics`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/metrics', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceMetricsExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_performance_settings(self, **kwargs): # noqa: E501
"""get_performance_settings # noqa: E501
List all performance settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_settings(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: PerformanceSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_performance_settings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_performance_settings_with_http_info(**kwargs) # noqa: E501
return data
def get_performance_settings_with_http_info(self, **kwargs): # noqa: E501
"""get_performance_settings # noqa: E501
List all performance settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_performance_settings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: PerformanceSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_performance_settings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/settings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceSettings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_performance_datasets(self, **kwargs): # noqa: E501
"""list_performance_datasets # noqa: E501
List all datasets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_performance_datasets(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: PerformanceDatasetsExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_performance_datasets_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.list_performance_datasets_with_http_info(**kwargs) # noqa: E501
return data
def list_performance_datasets_with_http_info(self, **kwargs): # noqa: E501
"""list_performance_datasets # noqa: E501
List all datasets. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_performance_datasets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: PerformanceDatasetsExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_performance_datasets" % key
)
params[key] = val
del params['kwargs']
if ('sort' in params and
len(params['sort']) > 255):
raise ValueError("Invalid value for parameter `sort` when calling `list_performance_datasets`, length must be less than or equal to `255`") # noqa: E501
if ('sort' in params and
len(params['sort']) < 0):
raise ValueError("Invalid value for parameter `sort` when calling `list_performance_datasets`, length must be greater than or equal to `0`") # noqa: E501
if 'limit' in params and params['limit'] > 4294967295: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_performance_datasets`, must be a value less than or equal to `4294967295`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_performance_datasets`, must be a value greater than or equal to `1`") # noqa: E501
if ('dir' in params and
len(params['dir']) < 0):
raise ValueError("Invalid value for parameter `dir` when calling `list_performance_datasets`, length must be greater than or equal to `0`") # noqa: E501
if ('resume' in params and
len(params['resume']) > 8192):
raise ValueError("Invalid value for parameter `resume` when calling `list_performance_datasets`, length must be less than or equal to `8192`") # noqa: E501
if ('resume' in params and
len(params['resume']) < 0):
raise ValueError("Invalid value for parameter `resume` when calling `list_performance_datasets`, length must be greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/datasets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PerformanceDatasetsExtended', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_performance_dataset(self, performance_dataset, performance_dataset_id, **kwargs): # noqa: E501
"""update_performance_dataset # noqa: E501
Modify the name of the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_performance_dataset(performance_dataset, performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceDataset performance_dataset: (required)
:param str performance_dataset_id: Modify the name of the performance dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_performance_dataset_with_http_info(performance_dataset, performance_dataset_id, **kwargs) # noqa: E501
else:
(data) = self.update_performance_dataset_with_http_info(performance_dataset, performance_dataset_id, **kwargs) # noqa: E501
return data
def update_performance_dataset_with_http_info(self, performance_dataset, performance_dataset_id, **kwargs): # noqa: E501
"""update_performance_dataset # noqa: E501
Modify the name of the performance dataset. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_performance_dataset_with_http_info(performance_dataset, performance_dataset_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceDataset performance_dataset: (required)
:param str performance_dataset_id: Modify the name of the performance dataset. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_dataset', 'performance_dataset_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_performance_dataset" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_dataset' is set
if ('performance_dataset' not in params or
params['performance_dataset'] is None):
raise ValueError("Missing the required parameter `performance_dataset` when calling `update_performance_dataset`") # noqa: E501
# verify the required parameter 'performance_dataset_id' is set
if ('performance_dataset_id' not in params or
params['performance_dataset_id'] is None):
raise ValueError("Missing the required parameter `performance_dataset_id` when calling `update_performance_dataset`") # noqa: E501
collection_formats = {}
path_params = {}
if 'performance_dataset_id' in params:
path_params['PerformanceDatasetId'] = params['performance_dataset_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'performance_dataset' in params:
body_params = params['performance_dataset']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/datasets/{PerformanceDatasetId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_performance_settings(self, performance_settings, **kwargs): # noqa: E501
"""update_performance_settings # noqa: E501
Configure performance settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_performance_settings(performance_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceSettingsExtended performance_settings: (required)
:param bool force: Allow modification of settings outside of recommended limits
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_performance_settings_with_http_info(performance_settings, **kwargs) # noqa: E501
else:
(data) = self.update_performance_settings_with_http_info(performance_settings, **kwargs) # noqa: E501
return data
def update_performance_settings_with_http_info(self, performance_settings, **kwargs): # noqa: E501
"""update_performance_settings # noqa: E501
Configure performance settings. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_performance_settings_with_http_info(performance_settings, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PerformanceSettingsExtended performance_settings: (required)
:param bool force: Allow modification of settings outside of recommended limits
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['performance_settings', 'force'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_performance_settings" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'performance_settings' is set
if ('performance_settings' not in params or
params['performance_settings'] is None):
raise ValueError("Missing the required parameter `performance_settings` when calling `update_performance_settings`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'force' in params:
query_params.append(('force', params['force'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'performance_settings' in params:
body_params = params['performance_settings']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/9/performance/settings', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
import numpy as np
import socket
from npsocket_sn import SocketNumpyArray
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
from math import *
import time
class Scenario(BaseScenario):
def __init__(self):
self.sock_sender = SocketNumpyArray()
self.sock_sender.initialize_sender('localhost', 9998)
self.n = None
self.x = None
self.y = None
self.theta = None
self.phero = None
# Target
self.target_x = 4.0
self.target_y = 0.0
self.target_index = 0
self.radius = 4
self.num_experiments = 20
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 1
num_obstacle = 4
num_target = 1
world.collaborative = True
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.1
# add obstacles
world.landmarks = [Landmark() for i in range(num_obstacle)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'obstacle %d' % i
landmark.collide = True
landmark.movable = False
landmark.size = 0.1
# add target
target = Landmark()
target.name = 'target'
target.collide = False
target.movable = False
target.size = 0.1
# Merge the landmarks (obstacles + target)
world.landmarks.append(target)
# make initial conditions
self.n = num_agents
self.x = [0.0, 0.0]*num_agents
self.y = [0.0, 0.0]*num_agents
self.theta = [0.0, 0.0]*num_agents
self.reset_world(world)
# Send initial information to pheromone system
self.sock_sender.send_number(self.n)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
if i == len(world.landmarks) - 1:
landmark.color = np.array([0.9, 0.1, 0.1])
# ========================================================================= #
# TARGET UPDATE #
# ========================================================================= #
# set random initial states
# robot position (0, 0), distance between robots and target (4 m)
angle_target = self.target_index*2*pi/self.num_experiments
self.target_x = self.radius*cos(angle_target)
self.target_y = self.radius*sin(angle_target)
if self.target_index < self.num_experiments-1:
self.target_index += 1
else:
self.target_index = 0
# ========================================================================= #
# OBJECT RESET #
# ========================================================================= #
# Agent update
world.agents[0].state.p_pose = np.asarray([0.0, 0.0, 0.0])
world.agents[0].state.p_pos = world.agents[0].state.p_pose[:2]
self.target = [[self.x[1], self.y[1]], [self.x[0], self.y[0]]] #20201130 Target
for i, agent in enumerate(world.agents):
#agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
agent.state.target = [self.target_x, self.target_y] # 20201201 target
# for i, landmark in enumerate(world.landmarks):
# if landmark.name[0] == 'o':
# landmark.state.p_pos = [0, 0] #np.random.uniform(-1, +1, world.dim_p)
# landmark.state.p_vel = np.zeros(world.dim_p)
# Obstacle update
world.landmarks[0].state.p_pos = np.asarray([2, 0])
world.landmarks[0].state.p_vel = np.zeros(world.dim_p)
world.landmarks[1].state.p_pos = np.asarray([0, 2])
world.landmarks[1].state.p_vel = np.zeros(world.dim_p)
world.landmarks[2].state.p_pos = np.asarray([-2, 0])
world.landmarks[2].state.p_vel = np.zeros(world.dim_p)
world.landmarks[3].state.p_pos = np.asarray([0, -2])
world.landmarks[3].state.p_vel = np.zeros(world.dim_p)
# Target update
world.landmarks[4].state.p_pos = np.asarray([self.target_x, self.target_y])
world.landmarks[4].state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
'''
Compute rewards
'''
distance_reward = 0.0
phero_reward = 0.0
goal_reward = 0.0
collision_reward = 0.0
angular_reward = 0.0
#angular_punish_rewards = [0.0]*self.num_robots
#linear_punish_rewards = [0.0]*self.num_robots
# 1. Distance Reward
goal_progress = agent.state.distance_to_goal_prev - agent.state.distance_to_goal
if abs(goal_progress) < 0.1:
#print("Goal Progress: {}".format(goal_progress))
#print("Angle: {}".format(agent.state.angle_diff))
if goal_progress >= 0:
distance_reward = goal_progress * 1.2
else:
distance_reward = goal_progress
else:
distance_reward = 0.0
# Scaling distance_reward
distance_reward = distance_reward * (5/world.dt)
# 2. Phero Reward
#phero_sum = np.sum(self.phero)
#phero_reward = -phero_sum*2
# 3. Goal Reward
if agent.state.distance_to_goal <= 0.3:
goal_reward = 100.0
#done = True
#self.reset(model_state, id_bots=idx[i])
# 4. Collision Penalty
for i, obstacle in enumerate([ob for ob in world.landmarks if 'obstacle' in ob.name]):
is_collision = self.is_collision(agent, obstacle)
if is_collision == True:
collision_reward = -50.0
# 5. Angular velocity penalty
if abs(agent.action.twist[1]) > 0.8:
angular_reward = -1
reward = distance_reward+phero_reward+goal_reward+collision_reward
print("----------------")
print("GP: {}".format(goal_progress))
print("Distance R: {}".format(distance_reward))
print("Collision R: {}".format(collision_reward))
print("Goal R: {}".format(goal_reward))
print("Angular R: {}".format(angular_reward))
print("Reward: {}".format(reward))
return reward
def observation(self, agent, world):
id = int(agent.name[-1])
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
positions = np.asarray(agent.state.p_pos)
self.sock_sender.send_numpy_array(positions)
data = self.sock_sender.receive_from_server()
self.phero = phero = np.asarray(data).reshape(1,9)
obs = np.hstack((phero, [agent.action.twist], np.asarray([agent.state.distance_to_goal]).reshape(1,1), np.asarray([agent.state.angle_diff]).reshape(1,1)))
#print("obs: {}".format(obs))
#print("Observation: {}".format(obs))
world.obs_n = np.shape(obs)[1]
return obs # 20201201 observation is changed (pheromone + velocity, distance, anglediff ) 1*13
def done(self, agent, world):
agent.state.done = False
# 1. Goal arrival
if agent.state.distance_to_goal <= 0.3:
print("Goal Arrived!")
agent.state.done = True
# 2. Out of range
if abs(agent.state.p_pos[0]) > 4.6 or abs(agent.state.p_pos[1]) > 4.6:
agent.state.done = True
print("out of range!!!!")
# 3. collision
for i, obstacle in enumerate([ob for ob in world.landmarks if 'obstacle' in ob.name]):
is_collision = self.is_collision(agent, obstacle)
if is_collision == True:
agent.state.done = True
print("Collision!!")
time.sleep(1)
done = agent.state.done
return done
def info(self, agent, world):
info = [{"targets": agent.state.target}]
return info |
import os
import yaml
from ddb.utils.process import run
class DockerUtils:
"""
A set of tools to manipulate docker using docker and docker-compose system commands
"""
@staticmethod
def service_up(name: str = None):
"""
Execute docker-compose up -d
:param name: the name of a specific service to up
:raise DockerComposeYamlMissingException: in case of missing docker-compose.yml in folder
:return:
"""
if not os.path.exists("docker-compose.yml"):
raise DockerComposeYamlMissingException
if name is None:
run("docker-compose", "up", "-d")
else:
run("docker-compose", "up", "-d", name)
@staticmethod
def service_stop(name: str):
"""
Execute docker-compose stop
:param name: the name of a specific service to stop
:raise DockerComposeYamlMissingException: in case of missing docker-compose.yml in folder
:return:
"""
if not os.path.exists("docker-compose.yml"):
raise DockerComposeYamlMissingException
if name is None:
run("docker-compose", "stop")
else:
run("docker-compose", "stop", name)
@staticmethod
def is_container_up(name: str):
"""
Check if the given service is up.
:param name: The name of the container to check
:raise DockerComposeYamlMissingException: in case of missing docker-compose.yml in folder
:return:
"""
if not os.path.exists("docker-compose.yml"):
raise DockerComposeYamlMissingException
container_id = run("docker-compose", "ps", "-q", name).decode("utf-8").rstrip()
if len(container_id) == 0:
return False
containers = run('docker', 'ps', '-q', '--no-trunc').decode("utf-8").rstrip().split('\n')
return container_id in containers
@staticmethod
def get_config():
"""
Get the docker-compose.yml configuration content.
:raise DockerComposeYamlMissingException: in case of missing docker-compose.yml
:return:
"""
if not os.path.exists("docker-compose.yml"):
raise DockerComposeYamlMissingException
config = run("docker-compose", "config")
return yaml.load(config, yaml.SafeLoader)
class DockerComposeYamlMissingException(Exception):
"""
Exception raised in case of missing docker-compose file.
"""
def __init__(self):
Exception.__init__()
self.message = "There is no docker-compose.yml file in current folder ({})".format(os.getcwd())
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RClusterprofiler(RPackage):
"""This package implements methods to analyze and visualize functional
profiles (GO and KEGG) of gene and gene clusters."""
homepage = "https://www.bioconductor.org/packages/clusterProfiler/"
git = "https://git.bioconductor.org/packages/clusterProfiler.git"
version('3.4.4', commit='b86b00e8405fe130e439362651a5567736e2d9d7')
depends_on('r@3.4.0:3.4.9', when='@3.4.4')
depends_on('r-tidyr', type=('build', 'run'))
depends_on('r-rvcheck', type=('build', 'run'))
depends_on('r-qvalue', type=('build', 'run'))
depends_on('r-plyr', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-gosemsim', type=('build', 'run'))
depends_on('r-go-db', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-dose', type=('build', 'run'))
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# print("something")
# In[2]:
print("something")
# # Heading 1
# # Heading 2
# # Heading 3
# In[6]:
a=5
print(a)
# In[7]:
b=2
print(B*B*B)
# In[8]:
print(b*b*b)
# In[ ]:
|
#----- Importar biblioteca do Mysql
import MySQLdb
from model.pessoa import Pessoa
class PessoaDao:
#----- Configurar a conexão
conexao = MySQLdb.connect(host='localhost', database='aulabd', user='root', passwd='')
#----- Salva o cursor da conexão em uma variável
cursor = conexao.cursor()
def listar_todos(self):
#----- Criação do comando SQL e passado para o cursor
comando_sql_select = "SELECT * FROM pessoa"
self.cursor.execute(comando_sql_select)
#---- Pega todos os resultados da execução do comando SQL e armazena em uma variável
resultado = self.cursor.fetchall()
lista_pessoas_classe = self.converter_tabela_classe(resultado)
return lista_pessoas_classe
def buscar_por_id(self, id):
#----- Criação do comando SQL e passado para o cursor
comando_sql_select = f"SELECT * FROM pessoa WHERE ID= {id}"
self.cursor.execute(comando_sql_select)
resultado = self.cursor.fetchone()
return resultado
def converter_tabela_classe(self, lista_tuplas):
#cria uma lista para armazenar os dicionarios
lista_pessoas = []
for p in lista_tuplas:
#----- Criação do objeto da classe pessoa
p1 = Pessoa()
#--- pega cada posição da tupla e atribui a uma chave do dicionário
p1.id = p[0]
p1.nome = p[1]
p1.sobrenome= p[2]
p1.idade = p[3]
p1.endereco_id = p[4]
lista_pessoas.append(p1)
return lista_pessoas
def salvar(self,pessoa):
comando=f"INSERT INTO pessoa (NOME,SOBRENOME,IDADE) VALUES ('{pessoa.nome}','{pessoa.sobrenome}','{pessoa.idade}')"
self.cursor.execute(comando)
self.conexao.commit()
def alterar(self,pessoa):
comando=f"UPDATE pessoa SET NOME='{pessoa.nome}',SOBRENOME={pessoa.sobrenome},IDADE='{pessoa.idade}')"
self.cursor.execute(comando)
self.conexao.commit()
def deletar(self,pessoa):
comando=f'DELETE FROM pessoa WHERE ID{id}'
self.cursor.execute(comando)
self.conexao.commit() |
# Copyright (c) 2011-2013, Alexander Kulakov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
FCGI_VERSION = 1
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_RECORD_HEADER_LEN = 8
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_RECORD_TYPES = {
FCGI_BEGIN_REQUEST: 'FCGI_BEGIN_REQUEST',
FCGI_ABORT_REQUEST: 'FCGI_ABORT_REQUEST',
FCGI_END_REQUEST: 'FCGI_END_REQUEST',
FCGI_PARAMS: 'FCGI_PARAMS',
FCGI_STDIN: 'FCGI_STDIN',
FCGI_STDOUT: 'FCGI_STDOUT',
FCGI_STDERR: 'FCGI_STDERR',
FCGI_DATA: 'FCGI_DATA',
FCGI_GET_VALUES: 'FCGI_GET_VALUES',
FCGI_GET_VALUES_RESULT: 'FCGI_GET_VALUES_RESULT',
}
EXISTING_REQUEST_RECORD_TYPES = frozenset((
FCGI_STDIN,
FCGI_DATA,
FCGI_PARAMS,
FCGI_ABORT_REQUEST,
))
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_MAX_CONTENT_LEN = 65535
|
__all__ = ['Degrees', 'Hours', 'Rad', 'arcsin', 'arccos', 'arctan', 'pi']
import numpy as np
from functools import lru_cache
from pylightcurve.errors import *
def _break_seconds(seconds):
seconds = float(seconds)
seconds_int = int(seconds)
seconds_dec = int(str(seconds).split('.')[1])
a = int(seconds_int / 3600.0)
seconds_int = int(round(seconds_int - a * 3600.0))
b = int(seconds_int / 60.0)
seconds_int = int(round(seconds_int - b * 60.0))
c = round(float('{0}.{1}'.format(seconds_int, seconds_dec)), 6)
return a, b, c
def _collapse_to_seconds(degrees_hours, minutes, seconds):
try:
degrees_hours = float(degrees_hours)
except:
if isinstance(degrees_hours, str):
if minutes != 0 or seconds != 0:
raise PyLCInputError('Not valid angle format')
else:
try:
degrees_hours, minutes, seconds = degrees_hours.replace(':', ' ').split()
if degrees_hours[0] == '-':
minutes = '-{0}'.format(minutes)
seconds = '-{0}'.format(seconds)
except:
raise PyLCInputError('Not valid angle format')
try:
degrees_hours = float(degrees_hours)
minutes = float(minutes)
seconds = float(seconds)
except:
raise PyLCInputError('Not valid angle format')
if degrees_hours <= 0 and minutes <= 0 and seconds <= 0:
pass
elif degrees_hours >= 0 and minutes >= 0 and seconds >= 0:
pass
else:
raise PyLCInputError('Not valid angle format. Hours, degrees, minutes and seconds should be either ALL '
'positive or ALL negative.')
return round(degrees_hours * 3600.0 + minutes * 60.0 + seconds, 6)
class _DMS:
def __init__(self, arcseconds):
self.d, self.m, self.s = _break_seconds(arcseconds)
self.list = [self.d, self.m, self.s]
def __str__(self):
return '{0}:{1}:{2}{3}'.format(str(self.d).zfill(2), str(self.m).zfill(2), '0'*(self.s < 10), str(self.s))
def __repr__(self):
return self.__str__()
class _HMS:
def __init__(self, seconds):
self.h, self.m, self.s = _break_seconds(seconds)
self.list = [self.h, self.m, self.s]
def __str__(self):
return '{0}:{1}:{2}{3}'.format(str(self.h).zfill(2), str(self.m).zfill(2), '0'*(self.s < 10), str(self.s))
def __repr__(self):
return self.__str__()
class _Angle:
def __init__(self, arcseconds):
arcseconds = round(arcseconds, 6)
if arcseconds < 0:
arcseconds += 1296000.0 * (int(abs(arcseconds) / 1296000.0) + 1.0)
arcseconds -= 1296000.0 * int(arcseconds / 1296000.0)
self.arcseconds = arcseconds
self._definition = '{0} arcsec'.format(arcseconds)
@lru_cache()
def deg(self):
return self.arcseconds / 3600.0
@lru_cache()
def deg_coord(self):
if self.deg() <= 90:
return self.deg()
elif self.deg() <= 180:
return 180 - self.deg()
elif self.deg() <= 270:
return - (self.deg() - 180.0)
else:
return - (360.0 - self.deg())
@lru_cache()
def hours(self):
return self.arcseconds / 15.0 / 3600.0
@lru_cache()
def rad(self):
return (self.arcseconds / 3600.0) * np.pi / 180.0
@lru_cache()
def dms(self):
return _DMS(self.arcseconds)
@lru_cache()
def dms_coord(self):
if self.deg() <= 90:
sign = '+'
dec_print = self
elif self.deg() <= 180:
sign = '+'
dec_print = pi - self
elif self.deg() <= 270:
sign = '-'
dec_print = self - pi
else:
sign = '-'
dec_print = 2 * pi - self
return '{0}{1}'.format(sign, dec_print.dms())
@lru_cache()
def hms(self):
return _HMS(self.arcseconds / 15.0)
@lru_cache()
def sin(self):
return np.sin(self.rad())
@lru_cache()
def cos(self):
return np.cos(self.rad())
@lru_cache()
def tan(self):
return np.tan(self.rad())
@lru_cache()
def _get_class(self):
return 'plc.{0}'.format(str(self.__class__).split('.')[-1][:-2])
def __add__(self, other):
_request_angle(other)
return Degrees(0, 0, self.arcseconds + other.arcseconds)
def __sub__(self, other):
_request_angle(other)
return Degrees(0, 0, self.arcseconds - other.arcseconds)
def __mul__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Degrees(0, 0, self.arcseconds * other)
else:
raise PyLCError('Operation not supported between {0} and {1}.'.format(self._get_class(), type(other)))
def __truediv__(self, other):
if isinstance(other, int) or isinstance(other, float):
return Degrees(0, 0, self.arcseconds / other)
else:
raise PyLCError('Operation not supported between {0} and {1}.'.format(self._get_class(), type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __repr__(self):
return self.__str__()
def __str__(self):
return '{0}({1} DMS, defined as {2})'.format(self._get_class(), self.dms(), self._definition)
class Degrees(_Angle):
def __init__(self, degrees, arcminutes=0.0, arcseconds=0.0):
total_arcseconds = _collapse_to_seconds(degrees, arcminutes, arcseconds)
_Angle.__init__(self, total_arcseconds)
if arcminutes == 0 and arcseconds == 0:
self._definition = '{0} degrees'.format(degrees)
else:
self._definition = '{0} degrees, {1} arcminutes, {2} arcseconds'.format(degrees, arcminutes, arcseconds)
class Hours(_Angle):
def __init__(self, hours, minutes=0.0, seconds=0.0):
total_arcseconds = _collapse_to_seconds(hours, minutes, seconds) * 15.0
_Angle.__init__(self, total_arcseconds)
if minutes == 0 and seconds == 0:
self._definition = '{0} hours'.format(hours)
else:
self._definition = '{0} hours, {1} minutes, {2} seconds'.format(hours, minutes, seconds)
class Rad(_Angle):
def __init__(self, rad):
try:
arcseconds = float(rad) * 648000.0 / np.pi
except:
raise PyLCInputError('Not valid input for rad.')
_Angle.__init__(self, arcseconds)
self._definition = '{0} rad'.format(rad)
def _is_angle(obsject):
if isinstance(obsject, Degrees) or isinstance(obsject, Hours) or isinstance(obsject, Rad):
return True
else:
return False
def _request_angle(item):
if _is_angle(item):
pass
else:
raise PyLCInputError('An angle object is required (plc.Degrees, plc.Hours or plc.Rad)')
def arccos(number):
return Rad(np.arccos(number))
def arcsin(number):
return Rad(np.arcsin(number))
def arctan(number):
return Rad(np.arctan(number))
pi = Degrees(180.0)
|
#!/usr/bin/env python
"""
Subrequests to do things like range requests, content negotiation checks,
and validation.
This is the base class for all subrequests.
"""
from abc import ABCMeta, abstractmethod
from configparser import SectionProxy
from typing import List, Tuple, Type, Union, TYPE_CHECKING
from redbot.resource.fetch import RedFetcher
from redbot.speak import Note, levels, categories
from redbot.type import StrHeaderListType
if TYPE_CHECKING:
from redbot.resource import (
HttpResource,
) # pylint: disable=cyclic-import,unused-import
class SubRequest(RedFetcher, metaclass=ABCMeta):
"""
Base class for a subrequest of a "main" HttpResource, made to perform
additional behavioural tests on the resource.
"""
check_name = "undefined"
response_phrase = "undefined"
def __init__(self, config: SectionProxy, base_resource: "HttpResource") -> None:
self.config = config
self.base = base_resource # type: HttpResource
RedFetcher.__init__(self, config)
self.check_done = False
self.on("fetch_done", self._check_done)
@abstractmethod
def done(self) -> None:
"""The subrequest is done, process it. Must be overridden."""
raise NotImplementedError
def _check_done(self) -> None:
if self.preflight():
self.done()
self.check_done = True
self.emit("check_done")
def check(self) -> None:
modified_headers = self.modify_request_headers(list(self.base.request.headers))
RedFetcher.set_request(
self,
self.base.request.uri,
self.base.request.method,
modified_headers,
self.base.request.payload,
)
RedFetcher.check(self)
@abstractmethod
def modify_request_headers(
self, base_request_headers: StrHeaderListType
) -> StrHeaderListType:
"""Usually overridden; modifies the request headers."""
return base_request_headers
def add_base_note(
self, subject: str, note: Type[Note], **kw: Union[str, int]
) -> None:
"Add a Note to the base resource."
kw["response"] = self.response_phrase
self.base.add_note(subject, note, **kw)
def check_missing_hdrs(self, hdrs: List[str], note: Type[Note]) -> None:
"""
See if the listed headers are missing in the subrequest; if so,
set the specified note.
"""
missing_hdrs = []
for hdr in hdrs:
if (
hdr in self.base.response.parsed_headers
and hdr not in self.response.parsed_headers
):
missing_hdrs.append(hdr)
if missing_hdrs:
self.add_base_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
self.add_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
class MISSING_HDRS_304(Note):
category = categories.VALIDATION
level = levels.WARN
summary = "%(response)s is missing required headers."
text = """\
HTTP requires `304 Not Modified` responses to have certain headers, if they are also present in a
normal (e.g., `200 OK` response).
%(response)s is missing the following headers: `%(missing_hdrs)s`.
This can affect cache operation; because the headers are missing, caches might remove them from
their cached copies."""
|
input1 = 0xe
input2 = 0x21
while (input1 <=0x9886):
input2+=0x1
input1+=0x41
print input2
|
#!/usr/bin/env python
import os
import sys
import argparse
import time
import zipfile
from subprocess import Popen, PIPE, STDOUT
from pdfminer.high_level import extract_text
from haystack import Finder
from haystack.indexing.cleaning import clean_wiki_text
from haystack.indexing.utils import convert_files_to_dicts, fetch_archive_from_http
from haystack.reader.farm import FARMReader
from haystack.reader.transformers import TransformersReader
from haystack.utils import print_answers
from haystack.database.elasticsearch import ElasticsearchDocumentStore
from haystack.retriever.elasticsearch import ElasticsearchRetriever
def parse_arguments():
parser = argparse.ArgumentParser(prog='Contract Analysis Q&A', description='Run Q&A on the supplied contracts')
parser.add_argument('path',
metavar='PATH_TO_DIRECTORY',
type=str,
help='The path to the Contract Analysis Directory')
parser.add_argument('questions_file',
metavar='QUESTIONS_FILE',
type=str,
help='The path to the .txt file containing a list of questions to be asked')
parser.add_argument('--gpu',
metavar='GPU_AVAILABILITY',
type=bool,
default=False,
help='Boolean value indicating whether or not a GPU is available for use')
args = parser.parse_args()
if not os.path.isdir(args.path):
print('ERR: The specified path does not exist or is not a directory.')
sys.exit()
if not os.path.isfile(args.questions_file):
print('ERR: The specified questions\' file does not exist.')
sys.exit()
return args.path, args.questions_file, args.gpu
def get_fileLoc_QList(args_path, args_questions_file):
with open(args_questions_file) as file:
questions_list = [string.rstrip('\n') for string in file]
pdf_docs_location = os.path.join(args_path, 'documents', 'pdfs')
txt_files_location = os.path.join(args_path, 'documents', 'txts')
results_location = os.path.join(args_path, 'results')
return pdf_docs_location, txt_files_location, results_location, questions_list
def convert_to_txt(pdf_docs_location, txt_files_location):
for dirpath, dirnames, files in os.walk(pdf_docs_location):
for file_name in files:
raw_text = extract_text(os.path.join(dirpath, file_name), caching=False)
os.mkdir(os.path.join(txt_files_location, file_name[:-4]))
text_file = open(os.path.join(txt_files_location, file_name[:-4], file_name[:-4] + ".txt"), "w+")
text_file.write(raw_text)
def start_elasticSearch():
es_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'],
stdout=PIPE, stderr=STDOUT,
preexec_fn=lambda: os.setuid(1), shell=True
)
time.sleep(30)
def get_results(txt_files_location, use_gpu, questions_list, results_location):
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document")
for dirpath, dirnames, files in os.walk(txt_files_location):
for dirname in dirnames:
for dirpath, dirname, files in os.walk(os.path.join(txt_files_location, dirname)):
for file_name in files:
document_store.client.indices.delete(index='document', ignore=[400, 404])
doc_dir = dirpath
dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
document_store.write_documents(dicts)
retriever = ElasticsearchRetriever(document_store=document_store)
reader = FARMReader(model_name_or_path="elgeish/cs224n-squad2.0-albert-xxlarge-v1", use_gpu = use_gpu)
finder = Finder(reader, retriever)
sys.stdout = open(os.path.join(results_location, file_name[:-4] + "_results.txt"), "a+")
for i, question in enumerate(questions_list):
prediction = finder.get_answers(question=question, top_k_retriever=10, top_k_reader=1)
print("\n\n\nQuestion " + str(i + 1) + ":\n")
print(question + "\n")
print_answers(prediction, details = "minimal")
sys.stdout.close()
document_store.client.transport.close()
def zip(src, dst):
zf = zipfile.ZipFile("%s.zip" % (dst), "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
def main():
args_path, args_questions_file, use_gpu = parse_arguments()
pdf_docs_location, txt_files_location, results_location, questions_list = get_fileLoc_QList(args_path, args_questions_file)
convert_to_txt(pdf_docs_location, txt_files_location)
start_elasticSearch()
get_results(txt_files_location, use_gpu, questions_list, results_location)
zip(os.path.join(args_path, results), os.path.join(args_path, results))
if __name__ == '__main__':
main()
|
import pygame # Required for rendering the screen
class Renderer:
"""
Handles the rendering actions of the game the free up Game for logic
"""
def __init__(self, screenSize):
"""
Initializes the render class
"""
self.backgroundColor = 0, 0, 0
self.setScreen(screenSize)
def render(self, map, entities):
"""
Renders the screen and everything in it
arg map - the map or section of map to render
arg entities - an array of the entities to render
"""
self.screen.fill(self.backgroundColor)
for i in range(0, len(entities)):
self.screen.blit(entities[i].getImage(), entities[i].get_rect())
pygame.display.flip()
def setScreen(self, screenSize):
"""
Sets the screen size and resets the pygame size
"""
# Catches invalid screen sizes
if(screenSize[0] <= 0 or screenSize[1] <= 0):
raise ValueError
# Catches max screensize
# TODO - do we want to error out or set the max values?
self.screenSize = screenSize
self.screen = pygame.display.set_mode(self.screenSize)
|
from PIL import Image
import pytesseract
im = Image.open('myimage.jpg')
print(pytesseract.image_to_string(im))
|
import numpy as np
def prof_weight_compute(list_probe_filenames,start_layer,last_layer,y_train2):
"""
Key function that computes the per sample weights based on probe classifier confidences from various layers. It averages the probe
confidences of the layers.
Parmeters:
list_probe_filenames (List of strings): A list of strings specifying path where probe confidences for different layers are stored.
start_layer (int): Index of the starting layer to average with from the list of filenames each storing probe confidences for a layer
final_layer (int): Index of the last layer to average with from the list of filenames each storing probe confidences for a layer.
y_train2 (numpy array): Labels corresponding to the set of datasamples in each of the filenames. Dimension - (num of samples x num of classes)
Return:
prof_weights (numpy array): A vector of weights for each data sample to train the simple model with. Dimension - (num of samples x 1)
"""
probe_2_prediction=np.load(list_probe_filenames[start_layer])
for r in range(start_layer+1,last_layer+1):
probe_2_prediction=probe_2_prediction+np.load(list_probe_filenames[r])
##*********************************************
num_layers_use=last_layer-start_layer+1
probe_2_prediction=probe_2_prediction.astype(float)/num_layers_use
num_examples=probe_2_prediction.shape[0]
prof_weights=np.zeros((num_examples,1))
##*********
for r in range(num_examples):
prof_weights[r]=np.dot(probe_2_prediction[r,:],y_train2[r,:])
return prof_weights
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import pyglet
from .data import *
class Resource(object):
def init(self):
pyglet.resource.path = data.get_subdirs('resources')
self.load_fonts(data.get_all_list(['FONTS']))
def load_fonts(self, fonts):
if fonts is not None:
for font in fonts:
pyglet.resource.add_font(font[0])
pyglet.font.load(font[1])
def load_image(self, path):
if os.path.splitext(path)[1].lower() == '.gif':
return pyglet.resource.animation(path)
else:
return pyglet.resource.image(path)
def load_static_image(self, path):
return pyglet.resource.image(path)
def decode_text(self, str):
if len(str) < 1:
return pyglet.text.decode_text(str)
elif str[0] == 'N':
# 游戏默认格式
return self.default_formatted_text(str[1:])
elif str[0] == 'H':
# HTML
return pyglet.text.decode_html(str[1:])
elif str[0] == 'P':
# 纯文本
return pyglet.text.decode_text(str[1:])
else:
# pyglet标准格式
return pyglet.text.decode_attributed(str[1:])
def default_formatted_text(self, str):
return self.decode_text('F' + data.get(['UI', 'NORMAL_MESSAGE_TEXT_STYLE_TEXT'], '') + str)
resource = Resource()
|
# Generated by Django 2.1.5 on 2019-01-25 17:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0007_choice_feature_text'),
]
operations = [
migrations.CreateModel(
name='ShoppingCart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=200)),
('product_order', models.CharField(default='', max_length=250)),
],
),
]
|
'''Amino acid usage: Reads and Clones
1/ Sequence logo for each length
2/ ttests: each length, each position (or pair of positions):
usage of the specific aa (or aas) in group1 cmp with group2
'''
|
from email.policy import default
from lib2to3.pgen2 import grammar
from lark import Lark,Token
from lark.tree import pydot__tree_to_png
from lark import Transformer
from numpy import isin
class transforma_lista(Transformer):
def start(self,items):
output = {}
output["len"] = len(items[1])
output['mostfreq'] = max(set(items[1]), key = items[1].count)
output['soma'] = 0
isAdding = False
for e in items[1]:
match(e):
case "agora":
isAdding = True
case "fim":
isAdding = False
case default:
if isAdding and e.isdigit():
output['soma'] += int(e)
return output
def elementos(self,items):
return list(filter(lambda x: x!=',',items))
def LISTA(self,lista):
return str(lista)
def PT(self,pt):
return str(pt)
def ELEM(self,elem):
return str(elem)
def VIR(self,vir):
return str(vir)
grammar = '''
start : LISTA elementos PT
LISTA : "Lista" | "LISTA"
elementos : ELEM (VIR ELEM)*
ELEM: "0".."9"+|("a".."z"|"A".."Z")+
VIR: ","
PT : "."
%import common.WS
%ignore WS
'''
l = Lark(grammar)
#frase = "LISTA 1 ."
#tree = l.parse(frase)
#print(tree)
#print(tree.pretty())
#for element in tree.children:
# print(element)
#
#tokens = tree.scan_values(lambda x:isinstance(x,Token))
#
#for token in tokens:
# print(token)
#
#pydot__tree_to_png(tree,"ex1.png")
#
#frase = "Lista aaa ."
#tree = l.parse(frase)
#print(tree)
#print(tree.pretty())
frase = "Lista 1,agora, 1, 2, fim, agora, 3,ola, 4, fim, 7, 8."
tree = l.parse(frase)
data = transforma_lista().transform(tree)
print(data)
#print(tree)
#print(tree.pretty())
|
# Advent of Code 2020
# Day 5
from boarding_pass import BoardingPass
from pathlib import Path
# input
with open(Path(__file__).parent / "input.txt") as f:
inp = BoardingPass.parse_bpasses(f.read())
# part 1
# Find the highest seat ID.
def part_1():
return max(b_pass.id for b_pass in inp)
print(f"Part 1: {part_1()}")
# part 2
# Find your seat ID (the only missing one not on edges).
def part_2():
ids = sorted(b_pass.id for b_pass in inp)
seat_range = range(ids[0], ids[-1] + 1)
return tuple(set(seat_range) - set(ids))[0]
print(f"Part 2: {part_2()}")
|
# This script cleans, compacts and groups DNS records for Alexa1m domains by name and date.
#
# Command to run this script on the CTIT cluster:
# $ spark-submit --master yarn --deploy-mode cluster --num-executors 300 src/data/spark/restructure_alexa1m_data.py
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import json
import datetime
schema = StructType([
StructField("query_type", StringType(), False),
StructField("query_name", StringType(), False),
StructField("response_type", StringType(), True),
StructField("response_name", StringType(), True),
StructField("timestamp", LongType(), False),
StructField("worker_id", IntegerType(), False),
StructField("status_code", IntegerType(), False),
StructField("ip4_address", StringType(), True),
StructField("ip6_address", StringType(), True),
StructField("country", StringType(), True),
StructField("as", StringType(), True),
StructField("as_full", StringType(), True),
StructField("cname_name", StringType(), True),
StructField("dname_name", StringType(), True),
StructField("mx_address", StringType(), True),
StructField("mx_preference", IntegerType(), True),
StructField("mxset_hash_algorithm", StringType(), True),
StructField("mxset_hash", StringType(), True),
StructField("ns_address", StringType(), True),
StructField("nsset_hash_algorithm", StringType(), True),
StructField("nsset_hash", StringType(), True),
StructField("txt_text", StringType(), True),
StructField("txt_hash_algorithm", StringType(), True),
StructField("txt_hash", StringType(), True),
StructField("ds_key_tag", IntegerType(), True),
StructField("ds_algorithm", IntegerType(), True),
StructField("ds_digest_type", IntegerType(), True),
StructField("ds_digest", StringType(), True),
StructField("dnskey_flags", IntegerType(), True),
StructField("dnskey_protocol", IntegerType(), True),
StructField("dnskey_algorithm", IntegerType(), True),
StructField("dnskey_pk_rsa_n", StringType(), True),
StructField("dnskey_pk_rsa_e", StringType(), True),
StructField("dnskey_pk_rsa_bitsize", IntegerType(), True),
StructField("dnskey_pk_eccgost_x", StringType(), True),
StructField("dnskey_pk_eccgost_y", StringType(), True),
StructField("dnskey_pk_dsa_t", StringType(), True),
StructField("dnskey_pk_dsa_q", StringType(), True),
StructField("dnskey_pk_dsa_p", StringType(), True),
StructField("dnskey_pk_dsa_g", StringType(), True),
StructField("dnskey_pk_dsa_y", StringType(), True),
StructField("dnskey_pk_eddsa_a", StringType(), True),
StructField("dnskey_pk_wire", StringType(), True),
StructField("nsec_next_domain_name", StringType(), True),
StructField("nsec_owner_rrset_types", StringType(), True),
StructField("nsec3_hash_algorithm", IntegerType(), True),
StructField("nsec3_flags", IntegerType(), True),
StructField("nsec3_iterations", IntegerType(), True),
StructField("nsec3_salt", StringType(), True),
StructField("nsec3_next_domain_name_hash", StringType(), True),
StructField("nsec3_owner_rrset_types", StringType(), True),
StructField("nsec3param_hash_algorithm", IntegerType(), True),
StructField("nsec3param_flags", IntegerType(), True),
StructField("nsec3param_iterations", IntegerType(), True),
StructField("nsec3param_salt", StringType(), True),
StructField("spf_text", StringType(), True),
StructField("spf_hash_algorithm", StringType(), True),
StructField("spf_hash", StringType(), True),
StructField("soa_mname", StringType(), True),
StructField("soa_rname", StringType(), True),
StructField("soa_serial", LongType(), True),
StructField("soa_refresh", LongType(), True),
StructField("soa_retry", LongType(), True),
StructField("soa_expire", LongType(), True),
StructField("soa_minimum", LongType(), True),
StructField("rrsig_type_covered", StringType(), True),
StructField("rrsig_algorithm", IntegerType(), True),
StructField("rrsig_labels", IntegerType(), True),
StructField("rrsig_original_ttl", LongType(), True),
StructField("rrsig_signature_inception", LongType(), True),
StructField("rrsig_signature_expiration", LongType(), True),
StructField("rrsig_key_tag", IntegerType(), True),
StructField("rrsig_signer_name", StringType(), True),
StructField("rrsig_signature", StringType(), True),
StructField("cds_key_tag", IntegerType(), True),
StructField("cds_algorithm", IntegerType(), True),
StructField("cds_digest_type", IntegerType(), True),
StructField("cds_digest", StringType(), True),
StructField("cdnskey_flags", IntegerType(), True),
StructField("cdnskey_protocol", IntegerType(), True),
StructField("cdnskey_algorithm", IntegerType(), True),
StructField("cdnskey_pk_rsa_n", StringType(), True),
StructField("cdnskey_pk_rsa_e", StringType(), True),
StructField("cdnskey_pk_rsa_bitsize", IntegerType(), True),
StructField("cdnskey_pk_eccgost_x", StringType(), True),
StructField("cdnskey_pk_eccgost_y", StringType(), True),
StructField("cdnskey_pk_dsa_t", StringType(), True),
StructField("cdnskey_pk_dsa_q", StringType(), True),
StructField("cdnskey_pk_dsa_p", StringType(), True),
StructField("cdnskey_pk_dsa_g", StringType(), True),
StructField("cdnskey_pk_dsa_y", StringType(), True),
StructField("cdnskey_pk_eddsa_a", StringType(), True),
StructField("cdnskey_pk_wire", StringType(), True),
StructField("caa_flags", IntegerType(), True),
StructField("caa_tag", StringType(), True),
StructField("caa_value", StringType(), True),
StructField("tlsa_usage", IntegerType(), True),
StructField("tlsa_selector", IntegerType(), True),
StructField("tlsa_matchtype", IntegerType(), True),
StructField("tlsa_certdata", StringType(), True),
StructField("ptr_name", StringType(), True)
])
def convert_record(obj, domain_name):
resp_name = obj['response_name']
resp_type = obj['response_type']
if resp_name is None:
# Ignore artificial records such as NSHASH, TXTHASH, etc.
# These records do not have a response_name
return []
name = resp_name
if domain_name in resp_name:
name = resp_name.replace(domain_name + '.', '')
if name.endswith('.'):
name = name[:-1]
if name == '':
name = '@'
arr = []
if resp_type == 'SOA':
mname = obj['soa_mname']
rname = obj['soa_rname']
retry = obj['soa_retry']
minimum = obj['soa_minimum']
serial = obj['soa_serial']
expire = obj['soa_expire']
refresh = obj['soa_refresh']
arr.append('%s %s %s %s %s %s %s %s %s' % (resp_name, resp_type, mname, rname, serial, refresh, retry, expire, minimum))
elif resp_type == 'NS':
arr.append('%s %s %s' % (name, resp_type, obj['ns_address']))
elif resp_type == 'AAAA':
arr.append('%s %s %s' % (name, resp_type, obj['ip6_address']))
elif resp_type == 'A':
arr.append('%s %s %s' % (name, resp_type, obj['ip4_address']))
elif resp_type == 'TXT':
arr.append('%s %s %s' % (name, resp_type, obj['txt_text']))
elif resp_type == 'SPF':
arr.append('%s %s %s' % (name, resp_type, obj['spf_text']))
elif resp_type == 'PTR':
arr.append('%s %s %s' % (name, resp_type, obj['ptr_text']))
elif resp_type == 'CNAME':
arr.append('%s %s %s' % (name, resp_type, obj['cname_name']))
elif resp_type == 'MX':
mx_pref = obj['mx_preference']
mx_addr = obj['mx_address']
arr.append('%s %s %s %s' % (name, resp_type, mx_pref, mx_addr))
elif resp_type == 'RRSIG':
arr.append('%s %s %s %s %s %s (%s %s %s %s %s)' % (
name,
resp_type,
obj['rrsig_type_covered'],
obj['rrsig_algorithm'],
obj['rrsig_labels'],
obj['rrsig_original_ttl'],
obj['rrsig_signature_expiration'],
obj['rrsig_signature_inception'],
obj['rrsig_key_tag'],
obj['rrsig_signer_name'],
obj['rrsig_signature'])
)
elif resp_type == 'DS':
arr.append('%s %s %s %s %s %s' % (
name,
resp_type,
obj['ds_key_tag'],
obj['ds_algorithm'],
obj['ds_digest_type'],
obj['ds_digest'])
)
elif resp_type == 'DNSKEY':
arr.append('%s %s %s %s %s' % (
name,
resp_type,
obj['dnskey_flags'],
obj['dnskey_protocol'],
obj['dnskey_algorithm'])
)
elif resp_type == 'CAA':
arr.append('%s %s %s %s %s' % (
name,
resp_type,
obj['caa_flags'],
obj['caa_tag'],
obj['caa_value'])
)
elif resp_type == 'NSEC':
arr.append('%s %s %s %s' % (
name,
resp_type,
obj['nsec_next_domain_name'],
obj['nsec_owner_rrset_types'])
)
else:
arr.append('%s %s' % (resp_name, resp_type))
return arr
def get_domain_name(row):
domain_name = row['query_name']
if domain_name.startswith('www.'):
domain_name = domain_name[4:] # Remove 'www.' prefix
if domain_name.endswith('.'):
domain_name = domain_name[:-1]
return domain_name
def sort_by_type(record):
return record.split(' ')[1]
def domain_obj_to_json(domain_obj):
records = domain_obj['records']
records = set(records) # remove duplicates
records = sorted(list(records), key=sort_by_type) # sort it
domain_obj['records'] = records
return json.dumps(domain_obj)
def process_multiple_rows(iterator):
domain_dict = {}
for row in iterator:
domain_name = get_domain_name(row)
if domain_name not in domain_dict:
domain_obj = {}
date = datetime.datetime.fromtimestamp(row['timestamp'] / 1000)
domain_obj['date'] = date.strftime('%Y%m%d')
domain_obj['records'] = []
domain_obj['domain'] = domain_name
domain_dict[domain_name] = domain_obj
else:
domain_obj = domain_dict[domain_name]
record = convert_record(row, domain_name)
if len(record) > 0:
domain_obj['records'].extend(record)
for domain_obj in domain_dict.values():
yield domain_obj_to_json(domain_obj)
sc = SparkContext(appName="Restructure Alexa1M Data")
sc.setLogLevel("ERROR")
sqlContext = SQLContext(sc)
in_path = '/user/s1962523/openintel-alexa1m/openintel-alexa1m-*/*.json.gz'
out_path = '/user/s1962523/openintel-alexa1m-compact'
df = sqlContext.read.option("inferSchema", "false").schema(schema).json(in_path)
rdd = df.rdd.mapPartitions(process_multiple_rows)
rdd.saveAsTextFile(path=out_path,
compressionCodecClass="org.apache.hadoop.io.compress.GzipCodec")
|
#!/usr/bin/env python3
from ..bit import Bit
from ..pin import Pin
from ..ic import IC
from .andic import AndIC
from .notic import NotIC
class NandIC(IC):
def __init__(self):
super().__init__(2, 1)
self.andgate = AndIC()
self.notgate = NotIC()
def run()
|
from flask import request, Response
from threading import Thread
from models import Node, Pod
from utils import debug
import clustertools
import apiserver
import traceback
import requests
import random
import params
import time
import sys
EXCLUDED_HEADERS = ['content-encoding', 'content-length', 'transfer-encoding', 'connection']
class BaseSync(Thread):
def __init__(self):
super().__init__()
self.listener = None
def set_listener(self, listener):
self.listener = listener
def run(self):
sleep_before = int(random.random() * params.REFRESH_SECONDS)
sleep_after = params.REFRESH_SECONDS - sleep_before
while True:
try:
time.sleep(sleep_before)
print("Starting sync")
self.sync()
print("Sync ended")
time.sleep(sleep_after)
except KeyboardInterrupt:
print("Bye")
sys.exit(0)
break
except:
debug("Sync has failed")
traceback.print_exc(file=sys.stdout)
def sync(self):
nodes = self.detect_nodes_and_pods()
self.listener.refresh_nodes(nodes)
def detect_nodes_and_pods(self):
nodes_data = apiserver.get_nodes()
pods_data = apiserver.get_pods()
nodes = [Node(x) for x in nodes_data]
pods = [Pod(x) for x in pods_data]
nodes_map = {x.ip:x for x in nodes}
for p in pods:
if not p.isReady:
print("Pod is not ready, skipping")
continue
if not p.hostIP in nodes_map:
print("Pod is attached to an unknown host, skipping:", p.hostIP)
continue
nodes_map[p.hostIP].add(p)
return [x for x in nodes if x.pods]
def refresh_cpu_stats(self, nodes, onlyPrimary=False):
if onlyPrimary:
nodes = [x for x in nodes if x.primary]
for node in nodes:
_, idle, busy, _, _ = clustertools.get_stats(node.ip)
node.idle = idle
node.busy = busy
class BaseStrategy:
def __init__(self, sync=BaseSync()):
self.sync = sync
self.nodes = []
sync.set_listener(self)
sync.start()
def refresh_scores(self, nodes_list):
score_sum = 0.0
for node in nodes_list:
score_sum += node.score_raw
node.score_sum = score_sum
if score_sum == 0.0:
print("Zeroed score_sum, changing to 1.0")
score_sum = 1.0
for node in nodes_list:
node.score_sum /= score_sum
node.score = node.score_raw / score_sum
def refresh_nodes(self, new_nodes, busy=False):
self.nodes = new_nodes
def pick_node_and_pod(self):
raise NotImplementedError("When subclassing BaseStrategy you must implement pick_node_and_pod to reflect your own strategy")
def forward(self, path):
node, pod = self.pick_node_and_pod()
response = self.forward_to(node, pod)
return response
def forward_to(self, node, pod):
target = 'http://' + pod.ip + ':4568/'
newurl = request.url.replace(request.host_url + "forward/", target)
resp = requests.request(
method=request.method,
url=newurl,
headers={key: value for (key, value) in request.headers if key != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False)
headers = [(name, value) for (name, value) in resp.raw.headers.items()
if name.lower() not in EXCLUDED_HEADERS]
#body = resp.content
body = resp.iter_content(chunk_size=10*1024)
return Response(body, resp.status_code, headers)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class TextLoss(nn.Module):
def __init__(self):
super().__init__()
@staticmethod
def ohem(predict, target, train_mask, negative_ratio=3.):
pos = (target * train_mask).byte()
neg = ((1 - target) * train_mask).byte()
n_pos = pos.float().sum()
if n_pos.item() > 0:
loss_pos = F.cross_entropy(predict[pos], target[pos], reduction='sum')
loss_neg = F.cross_entropy(predict[neg], target[neg], reduction='none')
n_neg = min(int(neg.float().sum().item()), int(negative_ratio * n_pos.float()))
else:
loss_pos = torch.tensor(0.)
loss_neg = F.cross_entropy(predict[neg], target[neg], reduction='none')
n_neg = 100
loss_neg, _ = torch.topk(loss_neg, n_neg)
return (loss_pos + loss_neg.sum()) / (n_pos + n_neg).float()
@staticmethod
def smooth_l1_loss(inputs, target, sigma=9.0):
try:
diff = torch.abs(inputs - target)
less_one = (diff < 1.0 / sigma).float()
loss = less_one * 0.5 * diff ** 2 * sigma \
+ torch.abs(torch.tensor(1.0) - less_one) * (diff - 0.5 / sigma)
loss = torch.mean(loss) if loss.numel() > 0 else torch.tensor(0.0)
except Exception as e:
print('RPN_REGR_Loss Exception:', e)
loss = torch.tensor(0.0)
return loss
def gcn_loss(self, gcn_data):
# gcn loss
gcn_pred = gcn_data[0]
labels = gcn_data[1].view(-1).long()
loss = F.cross_entropy(gcn_pred, labels) # *torch.tensor(0.0)
return loss
def forward(self, inputs, gcn_data, train_mask, tr_mask, tcl_mask, radii_map, sin_map, cos_map):
"""
calculate textsnake loss
:param inputs: (Variable), network predict, (BS, 8, H, W)
:param gcn_data: (Variable), (gcn_pred ,gtmat_batch)
:param tr_mask: (Variable), TR target, (BS, H, W)
:param tcl_mask: (Variable), TCL target, (BS, H, W)
:param sin_map: (Variable), sin target, (BS, H, W)
:param cos_map: (Variable), cos target, (BS, H, W)
:param radii_map: (Variable), radius target, (BS, H, W)
:param train_mask: (Variable), training mask, (BS, H, W)
:return: loss_tr, loss_tcl, loss_radii, loss_sin, loss_cos
"""
tr_pred = inputs[:, :2].permute(0, 2, 3, 1).contiguous().view(-1, 2) # (BSxHxW, 2)
tcl_pred = inputs[:, 2:4].permute(0, 2, 3, 1).contiguous().view(-1, 2) # (BSxHxW, 2)
sin_pred = inputs[:, 4].contiguous().view(-1) # (BSxHxW,)
cos_pred = inputs[:, 5].contiguous().view(-1) # (BSxHxW,)
# regularize sin and cos: sum to 1
scale = torch.sqrt(1.0 / (sin_pred ** 2 + cos_pred ** 2 + 0.0001))
sin_pred = sin_pred * scale
cos_pred = cos_pred * scale
top_pred = inputs[:, 6].contiguous().view(-1) # (BSxHxW,)
bot_pred = inputs[:, 7].contiguous().view(-1) # (BSxHxW,)
train_mask = train_mask.contiguous().view(-1) # (BSxHxW,)
tr_mask = tr_mask.contiguous().view(-1)
tcl_mask = tcl_mask[:, :, :, 0].contiguous().view(-1)
sin_map = sin_map.contiguous().view(-1)
cos_map = cos_map.contiguous().view(-1)
top_map = radii_map[:, :, :, 0].contiguous().view(-1)
bot_map = radii_map[:, :, :, 1].contiguous().view(-1)
# loss_tr = F.cross_entropy(tr_pred[train_mask], tr_mask[train_mask].long())
loss_tr = self.ohem(tr_pred, tr_mask.long(), train_mask.long())
loss_tcl = torch.tensor(0.)
tr_train_mask = train_mask * tr_mask
tr_neg_mask = 1 - tr_train_mask
if tr_train_mask.sum().item() > 0:
loss_tcl_pos = F.cross_entropy(tcl_pred[tr_train_mask], tcl_mask[tr_train_mask].long())
loss_tcl_neg = F.cross_entropy(tcl_pred[tr_neg_mask], tcl_mask[tr_neg_mask].long())
loss_tcl = loss_tcl_pos #+ loss_tcl_neg
# geometry losses
loss_radii = torch.tensor(0.)
loss_sin = torch.tensor(0.)
loss_cos = torch.tensor(0.)
tcl_train_mask = train_mask * tcl_mask
if tcl_train_mask.sum().item() > 0:
ones = torch.ones_like(top_pred[tcl_mask]).float()
loss_top = F.smooth_l1_loss(top_pred[tcl_mask] / (top_map[tcl_mask]+0.01), ones, reduction='none')
loss_bot = F.smooth_l1_loss(bot_pred[tcl_mask] / (bot_map[tcl_mask]+0.01), ones, reduction='none')
rad_map = top_map[tcl_mask] + bot_map[tcl_mask]
# loss_radii = torch.mean(torch.log10(rad_map+1.0)*(loss_top+loss_bot))
loss_radii = torch.mean(loss_top + loss_bot)
# loss_radii=torch.tensor(0);
loss_sin = self.smooth_l1_loss(sin_pred[tcl_mask], sin_map[tcl_mask])
loss_cos = self.smooth_l1_loss(cos_pred[tcl_mask], cos_map[tcl_mask])
# ## Graph convolution loss
gcn_loss = self.gcn_loss(gcn_data)
# gcn_loss = torch.tensor(0.)
return loss_tr, loss_tcl, loss_sin, loss_cos, loss_radii, gcn_loss
|
import argparse
def parameter_parser():
"""
A method to parse up command line parameters. By default it trains on the PubMed dataset.
The default hyperparameters give a good quality representation without grid search.
"""
parser = argparse.ArgumentParser(description = "Run .")
parser.add_argument("--edge-path",
nargs = "?",
default = "./input/edges.csv",
help = "Edge list csv.")
parser.add_argument("--features-path",
nargs = "?",
default = "./input/features.csv",
help = "Features json.")
parser.add_argument("--target-path",
nargs = "?",
default = "./input/target.csv",
help = "Target classes csv.")
parser.add_argument("--clustering-method",
nargs = "?",
default = "metis",
help = "Clustering method for graph decomposition. Default is the metis procedure.")
parser.add_argument("--epochs",
type = int,
default = 200,
help = "Number of training epochs. Default is 200.")
parser.add_argument("--seed",
type = int,
default = 42,
help = "Random seed for train-test split. Default is 42.")
parser.add_argument("--dropout",
type = float,
default = 0.5,
help = "Dropout parameter. Default is 0.5.")
parser.add_argument("--learning-rate",
type = float,
default = 0.01,
help = "Learning rate. Default is 0.01.")
parser.add_argument("--test-ratio",
type = float,
default = 0.9,
help = "Test data ratio. Default is 0.1.")
parser.add_argument("--cluster-number",
type = int,
default = 10,
help = "Number of clusters extracted. Default is 10.")
parser.set_defaults(layers = [16, 16, 16])
return parser.parse_args()
|
from behave import fixture
@fixture
def init_cluster(context):
context.execute_steps(u"""
Given the database is not running
And a working directory of the test as '/tmp/concourse_cluster'
And the user runs command "rm -rf ~/gpAdminLogs/gpinitsystem*"
And a cluster is created with mirrors on "mdw" and "sdw1, sdw2, sdw3"
""")
|
#!/usr/bin/env python3
#
# Copyright 2019 ROBOTIS CO., LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Darby Lim, Pyo
from rcl_interfaces.msg import ParameterDescriptor
from rclpy.node import Node
from rclpy.qos import QoSProfile
from rclpy.qos import qos_profile_action_status_default
from rclpy.qos import qos_profile_parameter_events
from rclpy.qos import qos_profile_parameters
from rclpy.qos import qos_profile_sensor_data
from rclpy.qos import qos_profile_services_default
from rclpy.qos import qos_profile_system_default
from examples_msgs.msg import Count
class Counter(Node):
def __init__(self, comment, qos_profile):
super().__init__('counter')
self.get_logger().debug('Test debug message')
self.i = 0
self.comment = comment
self.declare_parameter('comment', 'Hello', ParameterDescriptor())
qos = self.get_qos(qos_profile)
self.pub = self.create_publisher(Count, 'count', qos)
self.timer = self.create_timer(1.0, self.timer_callback)
def timer_callback(self):
self.i += 1
msg = Count()
msg.header.stamp = self.get_clock().now().to_msg()
msg.count = self.i
self.get_logger().info('[{0}] Counting: \'{1}\''.format(self.comment, msg.count))
self.pub.publish(msg)
def get_qos(self, i):
return {
0: QoSProfile(depth=10),
1: qos_profile_sensor_data,
2: qos_profile_parameters,
3: qos_profile_services_default,
4: qos_profile_parameter_events,
5: qos_profile_system_default,
6: qos_profile_action_status_default}[i]
|
import os.path
# Paths and dataset folder structure
DATASET_PATH = os.path.join(os.pardir, 'moving-sprites-dataset', 'dataset')
DATASET_TRAINING_PATH = os.path.join(DATASET_PATH, 'training')
DATASET_VALIDATION_PATH = os.path.join(DATASET_PATH, 'validation')
DATASET_TEST_PATH = os.path.join(DATASET_PATH, 'test')
DATASET_IMAGES_DIR = 'images'
DATASET_LABELS_FILE = 'labels.csv'
FRAME_IMAGE_FILE_NAME_FORMAT = 'image%05d.png'
# Dataset properties
RESOLUTION_WIDTH = 200
RESOLUTION_HEIGHT = 200
FRAMES_PER_SECOND = 8
# Training result folder structure
RESULTS_DIR = 'models'
RESULT_DIR_FORMAT = 'model_%s'
RESULT_MODEL_FILE = 'model.json'
RESULT_MODEL_WEIGHTS_FILE = 'model_weights.h5'
RESULT_MODEL_BEST_WEIGHTS_FILE = 'model_best_weights.h5' # weights of model with best validation performance
RESULT_PREDICTED_MASKS_DIR = 'masks_predicted'
RESULT_VIDEO_FILE = 'video.mp4'
RESULT_IMAGES_DIR = 'images_annotated'
RESULT_MODEL_PLOT = 'model.png'
RESULT_MASKS_DIR = 'masks_ground_truth'
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
]
operations = [
migrations.CreateModel(
name='FlatMenu',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(help_text='For internal reference only.', max_length=255)),
('handle', models.SlugField(help_text='Used in to reference this menu in templates etc. Must be unique for the selected site.', max_length=100)),
('heading', models.CharField(help_text='If supplied, appears above the menu when displayed on the on the front-end of the site.', max_length=255, blank=True)),
('site', models.ForeignKey(on_delete=models.deletion.CASCADE, related_name='flat_menus', to='wagtailcore.Site', verbose_name='site')),
],
options={
'verbose_name': 'flat menu',
'verbose_name_plural': 'flat menus',
},
),
migrations.CreateModel(
name='FlatMenuItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_text', models.CharField(help_text="If left blank, the page name will be used. Must be set if you're linking to a custom URL.", max_length=255, blank=True)),
('link_url', models.URLField(null=True, verbose_name='Link to a custom URL', blank=True)),
('link_page', models.ForeignKey(on_delete=models.deletion.CASCADE, related_name='+', verbose_name='Link to an internal page', blank=True, to='wagtailcore.Page', null=True)),
('menu', modelcluster.fields.ParentalKey(on_delete=models.deletion.CASCADE, related_name='menu_items', to='wagtailmenus.FlatMenu')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='MainMenu',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('site', models.OneToOneField(on_delete=models.deletion.CASCADE, related_name='main_menu', to='wagtailcore.Site')),
],
options={
'verbose_name': 'main menu',
'verbose_name_plural': 'main menu',
},
),
migrations.CreateModel(
name='MainMenuItem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_text', models.CharField(help_text="If left blank, the page name will be used. Must be set if you're linking to a custom URL.", max_length=255, blank=True)),
('link_url', models.URLField(null=True, verbose_name='Link to a custom URL', blank=True)),
('show_children_menu', models.BooleanField(default=True, help_text='The children menu will only appear if this menu item links to a page, and that page has children that are set to appear in menus.', verbose_name='Show a children menu for this item?')),
('repeat_in_children_menu', models.BooleanField(help_text="A menu item with children automatically becomes a toggle for accessing the pages below it. Repeating the link in it's children menu allows the page to remain accessible via the main navigation.", verbose_name='Repeat a link to this page in the children menu?')),
('children_menu_link_text', models.CharField(help_text='If left blank, the menu item text will be repeated.', max_length=255, verbose_name='Link text for children menu link', blank=True)),
('link_page', models.ForeignKey(on_delete=models.deletion.CASCADE, related_name='+', verbose_name='Link to an internal page', blank=True, to='wagtailcore.Page', null=True)),
('menu', modelcluster.fields.ParentalKey(on_delete=models.deletion.CASCADE, related_name='menu_items', to='wagtailmenus.MainMenu')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='flatmenu',
unique_together=set([('site', 'handle')]),
),
]
|
#!/usr/bin/env python
#
# A python package for interpreting METAR and SPECI weather reports.
#
# US conventions for METAR/SPECI reports are described in chapter 12 of
# the Federal Meteorological Handbook No.1. (FMH-1 1995),issued by NOAA.
# See <http://metar.noaa.gov/>
#
# International conventions for the METAR and SPECI codes are specified in
# the WMO Manual on Codes,vol I.1,Part A (WMO-306 I.i.A).
#
# This module handles a reports that follow the US conventions,as well
# the more general encodings in the WMO spec. Other regional conventions
# are not supported at present.
#
# The current METAR report for a given Station is available at the URL
# http://weather.noaa.gov/pub/data/observations/metar/stations/<Station>.TXT
# where <Station> is the four-letter ICAO Station code.
#
# The METAR reports for all reporting stations for any "cycle" (i.e.,hour)
# in the last 24 hours is available in a single file at the URL
# http://weather.noaa.gov/pub/data/observations/metar/cycles/<cycle>Z.TXT
# where <cycle> is a 2-digit cycle number (e.g.,"00","05" or "23").
#
# Copyright 2004 Tom Pollard
#
"""
This module defines the MetarRowParser class. A MetarRowParser object represents the weather report encoded by a single METAR code.
"""
import re
import datetime
import string
from .Datatypes import *
## Exceptions
class ParserError(Exception):
"""Exception raised when an unparseable group is found in body of the report."""
pass
## regular expressions to decode various groups of the METAR code
MISSING_RE = re.compile(r"^[M/]+$")
TYPE_RE = re.compile(r"^(?P<type>METAR|SPECI)\s+")
STATION_RE = re.compile(r"^(?P<Station>[A-Z][A-Z0-9]{3})\s+")
TIME_RE = re.compile(r"""^(?P<day>\d\d)
(?P<hour>\d\d)
(?P<min>\d\d)Z?\s+""",
re.VERBOSE)
MODIFIER_RE = re.compile(r"^(?P<mod>AUTO|FINO|NIL|TEST|CORR?|RTD|CC[A-G])\s+")
WIND_RE = re.compile(r"""^(?P<dir>[\dO]{3}|[0O]|///|MMM|VRB)
(?P<Speed>P?[\dO]{2,3}|[0O]+|[/M]{2,3})
(G(?P<gust>P?(\d{1,3}|[/M]{1,3})))?
(?P<units>KTS?|LT|K|T|KMH|MPS)?
(\s+(?P<varfrom>\d\d\d)V
(?P<varto>\d\d\d))?\s+""",
re.VERBOSE)
# VISIBILITY_RE = re.compile(r"""^(?P<vis>(?P<dist>M?(\d\s+)?\d/\d\d?|M?\d+)
# ( \s*(?P<units>SM|KM|M|U) | NDV |
# (?P<dir>[NSEW][EW]?) )? |
# CAVOK )\s+""",
# re.VERBOSE)
# start patch
VISIBILITY_RE = re.compile(r"""^(?P<vis>(?P<dist>(M|P)?\d\d\d\d|////)
(?P<dir>[NSEW][EW]? | NDV)? |
(?P<distu>(M|P)?(\d+|\d\d?/\d\d?|\d+\s+\d/\d))
(?P<units>SM|KM|M|U) |
CAVOK )\s+""",
re.VERBOSE)
# end patch
RUNWAY_RE = re.compile(r"""^(RVRNO |
R(?P<name>\d\d(RR?|LL?|C)?)/
(?P<low>(M|P)?\d\d\d\d)
(V(?P<high>(M|P)?\d\d\d\d))?
(?P<unit>FT)?[/NDU]*)\s+""",
re.VERBOSE)
WEATHER_RE = re.compile(r"""^(?P<int>(-|\+|VC)*)
(?P<desc>(MI|PR|BC|DR|BL|SH|TS|FZ)+)?
(?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP|/)*)
(?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)?
(?P<other>PO|SQ|FC|SS|DS|NSW|/+)?
(?P<int2>[-+])?\s+""",
re.VERBOSE)
SKY_RE = re.compile(r"""^(?P<cover>VV|CLR|SKC|SCK|NSC|NCD|BKN|SCT|FEW|[O0]VC|///)
(?P<height>[\dO]{2,4}|///)?
(?P<cloud>([A-Z][A-Z]+|///))?\s+""",
re.VERBOSE)
TEMP_RE = re.compile(r"""^(?P<temp>(M|-)?\d+|//|XX|MM)/
(?P<dewpt>(M|-)?\d+|//|XX|MM)?\s+""",
re.VERBOSE)
PRESS_RE = re.compile(r"""^(?P<unit>A|Q|QNH|SLP)?
(?P<press>[\dO]{3,4}|////)
(?P<unit2>INS)?\s+""",
re.VERBOSE)
RECENT_RE = re.compile(r"""^RE(?P<desc>MI|PR|BC|DR|BL|SH|TS|FZ)?
(?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP)*)?
(?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)?
(?P<other>PO|SQ|FC|SS|DS)?\s+""",
re.VERBOSE)
WINDSHEAR_RE = re.compile(r"^(WS\s+)?(ALL\s+RWY|RWY(?P<name>\d\d(RR?|L?|C)?))\s+")
COLOR_RE = re.compile(r"""^(BLACK)?(BLU|GRN|WHT|RED)\+?
(/?(BLACK)?(BLU|GRN|WHT|RED)\+?)*\s*""",
re.VERBOSE)
RUNWAYSTATE_RE = re.compile(r"""((?P<name>\d\d) | R(?P<namenew>\d\d)(RR?|LL?|C)?/?)
((?P<special> SNOCLO|CLRD(\d\d|//)) |
(?P<deposit>(\d|/))
(?P<extent>(\d|/))
(?P<depth>(\d\d|//))
(?P<friction>(\d\d|//)))\s+""",
re.VERBOSE)
TREND_RE = re.compile(r"^(?P<trend>TEMPO|BECMG|FCST|NOSIG)\s+")
TRENDTIME_RE = re.compile(r"(?P<when>(FM|TL|AT))(?P<hour>\d\d)(?P<min>\d\d)\s+")
REMARK_RE = re.compile(r"^(RMKS?|NOSPECI|NOSIG)\s+")
## regular expressions for remark groups
AUTO_RE = re.compile(r"^AO(?P<type>\d)\s+")
SEALVL_PRESS_RE = re.compile(r"^SLP(?P<press>\d\d\d)\s+")
PEAK_WIND_RE = re.compile(r"""^P[A-Z]\s+WND\s+
(?P<dir>\d\d\d)
(?P<Speed>P?\d\d\d?)/
(?P<hour>\d\d)?
(?P<min>\d\d)\s+""",
re.VERBOSE)
WIND_SHIFT_RE = re.compile(r"""^WSHFT\s+
(?P<hour>\d\d)?
(?P<min>\d\d)
(\s+(?P<front>FROPA))?\s+""",
re.VERBOSE)
PRECIP_1HR_RE = re.compile(r"^P(?P<precip>\d\d\d\d)\s+")
PRECIP_24HR_RE = re.compile(r"""^(?P<type>6|7)
(?P<precip>\d\d\d\d)\s+""",
re.VERBOSE)
PRESS_3HR_RE = re.compile(r"""^5(?P<tend>[0-8])
(?P<press>\d\d\d)\s+""",
re.VERBOSE)
TEMP_1HR_RE = re.compile(r"""^T(?P<tsign>0|1)
(?P<temp>\d\d\d)
((?P<dsign>0|1)
(?P<dewpt>\d\d\d))?\s+""",
re.VERBOSE)
TEMP_6HR_RE = re.compile(r"""^(?P<type>1|2)
(?P<sign>0|1)
(?P<temp>\d\d\d)\s+""",
re.VERBOSE)
TEMP_24HR_RE = re.compile(r"""^4(?P<smaxt>0|1)
(?P<maxt>\d\d\d)
(?P<smint>0|1)
(?P<mint>\d\d\d)\s+""",
re.VERBOSE)
UNPARSED_RE = re.compile(r"(?P<group>\S+)\s+")
LIGHTNING_RE = re.compile(r"""^((?P<freq>OCNL|FRQ|CONS)\s+)?
LTG(?P<type>(IC|CC|CG|CA)*)
( \s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ |
[NSEW][EW]? (-[NSEW][EW]?)* )+) )?\s+""",
re.VERBOSE)
TS_LOC_RE = re.compile(r"""TS(\s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ |
[NSEW][EW]? (-[NSEW][EW]?)* )+))?
( \s+MOV\s+(?P<dir>[NSEW][EW]?) )?\s+""",
re.VERBOSE)
## translation of weather location codes
loc_terms = [("OHD", "overhead"),
("DSNT", "distant"),
("AND", "and"),
("VC", "nearby")]
def xlate_loc(loc):
"""Substitute English terms for the location codes in the given string."""
for code, english in loc_terms:
loc = loc.replace(code, english)
return loc
## translation of the sky-condition codes into english
SKY_COVER = {"SKC": "clear",
"CLR": "clear",
"NSC": "clear",
"NCD": "clear",
"FEW": "a few ",
"SCT": "scattered ",
"BKN": "broken ",
"OVC": "overcast",
"///": "",
"VV": "indefinite ceiling"}
CLOUD_TYPE = {"TCU": "towering cumulus",
"CU": "cumulus",
"CB": "cumulonimbus",
"SC": "stratocumulus",
"CBMAM": "cumulonimbus mammatus",
"ACC": "altocumulus castellanus",
"SCSL": "standing lenticular stratocumulus",
"CCSL": "standing lenticular cirrocumulus",
"ACSL": "standing lenticular altocumulus"}
## translation of the present-weather codes into english
WEATHER_INT = {"-": "light",
"+": "heavy",
"-VC": "nearby light",
"+VC": "nearby heavy",
"VC": "nearby"}
WEATHER_DESC = {"MI": "shallow",
"PR": "partial",
"BC": "patches of",
"DR": "low drifting",
"BL": "blowing",
"SH": "showers",
"TS": "thunderstorm",
"FZ": "freezing"}
WEATHER_PREC = {"DZ": "drizzle",
"RA": "rain",
"SN": "snow",
"SG": "snow grains",
"IC": "ice crystals",
"PL": "ice pellets",
"GR": "hail",
"GS": "snow pellets",
"UP": "unknown Precipitation",
"//": ""}
WEATHER_OBSC = {"BR": "mist",
"FG": "fog",
"FU": "smoke",
"VA": "volcanic ash",
"DU": "dust",
"SA": "sand",
"HZ": "haze",
"PY": "spray"}
WEATHER_OTHER = {"PO": "sand whirls",
"SQ": "squalls",
"FC": "funnel cloud",
"SS": "sandstorm",
"DS": "dust storm"}
WEATHER_SPECIAL = {"+FC": "tornado"}
COLOR = {"BLU": "blue",
"GRN": "green",
"WHT": "white"}
## translation of various remark codes into English
PRESSURE_TENDENCY = {"0": "increasing, then decreasing",
"1": "increasing more slowly",
"2": "increasing",
"3": "increasing more quickly",
"4": "steady",
"5": "decreasing, then increasing",
"6": "decreasing more slowly",
"7": "decreasing",
"8": "decreasing more quickly"}
LIGHTNING_FREQUENCY = {"OCNL": "occasional",
"FRQ": "frequent",
"CONS": "constant"}
LIGHTNING_TYPE = {"IC": "intracloud",
"CC": "cloud-to-cloud",
"CG": "cloud-to-ground",
"CA": "cloud-to-air"}
REPORT_TYPE = {"METAR": "routine report",
"SPECI": "special report",
"AUTO": "automatic report",
"COR": "manually corrected report"}
## Helper functions
def _report_match(handler, match):
"""Report success or failure of the given handler function. (DEBUG)"""
if match:
print(handler.__name__, " matched '" + match + "'")
else:
print(handler.__name__, " didn't match...")
def _unparsedGroup(self, d):
"""
Handle otherwise unparseable main-body groups.
"""
self._unparsed_groups.append(d['group'])
## METAR report objects
debug = False
class MetarRowParser(object):
"""METAR (aviation meteorology report)"""
def __init__(self, metarcode, month=None, year=None, utcdelta=None):
"""Parse raw METAR code."""
self.code = metarcode # original METAR code
self.type = 'METAR' # METAR (routine) or SPECI (special)
self.mod = "AUTO" # AUTO (automatic) or COR (corrected)
self.station_id = None # 4-character ICAO Station code
self.time = None # observation time [datetime]
self.cycle = None # observation cycle (0-23) [int]
self.wind_dir = None # wind Direction [Direction]
self.wind_speed = None # wind Speed [Speed]
self.wind_gust = None # wind gust Speed [Speed]
self.wind_dir_from = None # beginning of range for win dir [Direction]
self.wind_dir_to = None # end of range for wind dir [Direction]
self.vis = None # visibility [Distance]
self.vis_dir = None # visibility Direction [Direction]
self.max_vis = None # visibility [Distance]
self.max_vis_dir = None # visibility Direction [Direction]
self.temp = None # Temperature (C) [Temperature]
self.dewpt = None # dew point (C) [Temperature]
self.press = None # barometric Pressure [Pressure]
self.runway = [] # runway visibility (list of tuples)
self.weather = [] # present weather (list of tuples)
self.recent = [] # recent weather (list of tuples)
self.sky = [] # sky conditions (list of tuples)
self.windshear = [] # runways w/ wind shear (list of strings)
self.wind_speed_peak = None # peak wind Speed in last hour
self.wind_dir_peak = None # Direction of peak wind Speed in last hour
self.peak_wind_time = None # time of peak wind observation [datetime]
self.wind_shift_time = None # time of wind shift [datetime]
self.max_temp_6hr = None # max temp in last 6 hours
self.min_temp_6hr = None # min temp in last 6 hours
self.max_temp_24hr = None # max temp in last 24 hours
self.min_temp_24hr = None # min temp in last 24 hours
self.press_sea_level = None # sea-level Pressure
self.precip_1hr = None # Precipitation over the last hour
self.precip_3hr = None # Precipitation over the last 3 hours
self.precip_6hr = None # Precipitation over the last 6 hours
self.precip_24hr = None # Precipitation over the last 24 hours
self._trend = False # trend groups present (bool)
self._trend_groups = [] # trend forecast groups
self._remarks = [] # remarks (list of strings)
self._unparsed_groups = []
self._unparsed_remarks = []
self._now = datetime.datetime.utcnow()
if utcdelta:
self._utcdelta = utcdelta
else:
self._utcdelta = datetime.datetime.now() - self._now
self._month = month
self._year = year
code = self.code + " " # (the regexps all expect trailing spaces...)
try:
ngroup = len(MetarRowParser.handlers)
igroup = 0
ifailed = -1
while igroup < ngroup and code:
pattern, handler, repeatable = MetarRowParser.handlers[igroup]
if debug: print(handler.__name__, ": ", code)
m = pattern.match(code)
while m:
ifailed = -1
if debug: _report_match(handler, m.group())
handler(self, m.groupdict())
code = code[m.end():]
if self._trend:
code = self._do_trend_handlers(code)
if not repeatable: break
if debug: print(handler.__name__, ": ", code)
m = pattern.match(code)
if not m and ifailed < 0:
ifailed = igroup
igroup += 1
if igroup == ngroup and not m:
# print "** it's not a main-body group **"
pattern, handler = (UNPARSED_RE, _unparsedGroup)
if debug: print(handler.__name__, ": ", code)
m = pattern.match(code)
if debug: _report_match(handler, m.group())
handler(self, m.groupdict())
code = code[m.end():]
igroup = ifailed
ifailed = -2 # if it's still -2 when we run out of main-body
# groups, we'll try parsing this group as a remark
if pattern == REMARK_RE or self.press:
while code:
for pattern, handler in MetarRowParser.remark_handlers:
if debug: print(handler.__name__, ": ", code)
m = pattern.match(code)
if m:
if debug: _report_match(handler, m.group())
handler(self, m.groupdict())
code = pattern.sub("", code, 1)
break
except Exception as err:
raise ParserError(handler.__name__ + " failed while processing '" + code + "'\n" + ' '.join(err.args))
raise err
if self._unparsed_groups:
code = ' '.join(self._unparsed_groups)
raise ParserError("Unparsed groups in body: "+code)
def _do_trend_handlers(self, code):
for pattern, handler, repeatable in MetarRowParser.trend_handlers:
if debug: print(handler.__name__, ": ", code)
m = pattern.match(code)
while m:
if debug: _report_match(handler, m.group())
self._trend_groups.append(str(m.group()).strip())
handler(self, m.groupdict())
code = code[m.end():]
if not repeatable: break
m = pattern.match(code)
return code
def __str__(self):
return self.string()
def _handleType(self, d):
"""
Parse the code-type group.
The following attributes are set:
type [string]
"""
self.type = d['type']
def _handleStation(self, d):
"""
Parse the Station id group.
The following attributes are set:
station_id [string]
"""
self.station_id = d['Station']
def _handleModifier(self, d):
"""
Parse the report-modifier group.
The following attributes are set:
mod [string]
"""
mod = d['mod']
if mod == 'CORR': mod = 'COR'
if mod == 'NIL' or mod == 'FINO': mod = 'NO DATA'
self.mod = mod
def _handleTime(self, d):
"""
Parse the observation-time group.
The following attributes are set:
time [datetime]
cycle [int]
_day [int]
_hour [int]
_min [int]
"""
self._day = int(d['day'])
if not self._month:
self._month = self._now.month
if self._day > self._now.day:
if self._month == 1:
self._month = 12
else:
self._month = self._month - 1
if not self._year:
self._year = self._now.year
if self._month > self._now.month:
self._year = self._year - 1
elif self._month == self._now.month and self._day > self._now.day:
self._year = self._year - 1
self._hour = int(d['hour'])
self._min = int(d['min'])
self.time = datetime.datetime(self._year, self._month, self._day,
self._hour, self._min)
if self._min < 45:
self.cycle = self._hour
else:
self.cycle = self._hour + 1
def _handleWind(self, d):
"""
Parse the wind and variable-wind groups.
The following attributes are set:
wind_dir [Direction]
wind_speed [Speed]
wind_gust [Speed]
wind_dir_from [int]
wind_dir_to [int]
"""
wind_dir = d['dir'].replace('O', '0')
if wind_dir != "VRB" and wind_dir != "///" and wind_dir != "MMM":
self.wind_dir = Direction(wind_dir)
wind_speed = d['Speed'].replace('O', '0')
units = d['units']
if units == 'KTS' or units == 'K' or units == 'T' or units == 'LT':
units = 'KT'
if wind_speed.startswith("P"):
self.wind_speed = Speed(wind_speed[1:], units, ">")
elif not MISSING_RE.match(wind_speed):
self.wind_speed = Speed(wind_speed, units)
if d['gust']:
wind_gust = d['gust']
if wind_gust.startswith("P"):
self.wind_gust = Speed(wind_gust[1:], units, ">")
elif not MISSING_RE.match(wind_gust):
self.wind_gust = Speed(wind_gust, units)
if d['varfrom']:
self.wind_dir_from = Direction(d['varfrom'])
self.wind_dir_to = Direction(d['varto'])
def _handleVisibility(self, d):
"""
Parse the minimum and maximum visibility groups.
The following attributes are set:
vis [Distance]
vis_dir [Direction]
max_vis [Distance]
max_vis_dir [Direction]
"""
vis = d['vis']
vis_less = None
vis_dir = None
vis_units = "M"
vis_dist = "10000"
if d['dist'] and d['dist'] != '////':
vis_dist = d['dist']
if d['dir'] and d['dir'] != 'NDV':
vis_dir = d['dir']
elif d['distu']:
vis_dist = d['distu']
if d['units'] and d['units'] != "U":
vis_units = d['units']
if vis_dist == "9999":
vis_dist = "10000"
vis_less = ">"
if self.vis:
if vis_dir:
self.max_vis_dir = Direction(vis_dir)
self.max_vis = Distance(vis_dist, vis_units, vis_less)
else:
if vis_dir:
self.vis_dir = Direction(vis_dir)
self.vis = Distance(vis_dist, vis_units, vis_less)
def _handleRunway(self, d):
"""
Parse a runway visual range group.
The following attributes are set:
range [list of tuples]
. name [string]
. low [Distance]
. high [Distance]
"""
if d['name']:
name = d['name']
low = Distance(d['low'])
if d['high']:
high = Distance(d['high'])
else:
high = low
self.runway.append((name, low, high))
def _handleWeather(self, d):
"""
Parse a present-weather group.
The following attributes are set:
weather [list of tuples]
. intensity [string]
. description [string]
. Precipitation [string]
. obscuration [string]
. other [string]
"""
inteni = d['int']
if not inteni and d['int2']:
inteni = d['int2']
desci = d['desc']
preci = d['prec']
obsci = d['obsc']
otheri = d['other']
self.weather.append((inteni, desci, preci, obsci, otheri))
def _handleSky(self, d):
"""
Parse a sky-conditions group.
The following attributes are set:
sky [list of tuples]
. cover [string]
. height [Distance]
. cloud [string]
"""
height = d['height']
if not height or height == "///":
height = None
else:
height = height.replace('O', '0')
height = Distance(int(height) * 100, "FT")
cover = d['cover']
if cover == 'SCK' or cover == 'SKC' or cover == 'CL': cover = 'CLR'
if cover == '0VC': cover = 'OVC'
cloud = d['cloud']
if cloud == '///': cloud = ""
self.sky.append((cover, height, cloud))
def _handleTemp(self, d):
"""
Parse a Temperature-dewpoint group.
The following attributes are set:
temp Temperature (Celsius) [float]
dewpt dew point (Celsius) [float]
"""
temp = d['temp']
dewpt = d['dewpt']
if temp and temp != "//" and temp != "XX" and temp != "MM":
self.temp = Temperature(temp)
if dewpt and dewpt != "//" and dewpt != "XX" and dewpt != "MM":
self.dewpt = Temperature(dewpt)
def _handlePressure(self, d):
"""
Parse an altimeter-Pressure group.
The following attributes are set:
press [int]
"""
press = d['press']
if press != '////':
press = float(press.replace('O', '0'))
if d['unit']:
if d['unit'] == 'A' or (d['unit2'] and d['unit2'] == 'INS'):
self.press = Pressure(press / 100, 'IN')
elif d['unit'] == 'SLP':
if press < 500:
press = press / 10 + 1000
else:
press = press / 10 + 900
self.press = Pressure(press, 'MB')
self._remarks.append("sea-level Pressure %.1fhPa" % press)
else:
self.press = Pressure(press, 'MB')
elif press > 2500:
self.press = Pressure(press / 100, 'IN')
else:
self.press = Pressure(press, 'MB')
def _handleRecent(self, d):
"""
Parse a recent-weather group.
The following attributes are set:
weather [list of tuples]
. intensity [string]
. description [string]
. Precipitation [string]
. obscuration [string]
. other [string]
"""
desci = d['desc']
preci = d['prec']
obsci = d['obsc']
otheri = d['other']
self.recent.append(("", desci, preci, obsci, otheri))
def _handleWindShear(self, d):
"""
Parse wind-shear groups.
The following attributes are set:
windshear [list of strings]
"""
if d['name']:
self.windshear.append(d['name'])
else:
self.windshear.append("ALL")
def _handleColor(self, d):
"""
Parse (and ignore) the color groups.
The following attributes are set:
trend [list of strings]
"""
pass
def _handleRunwayState(self, d):
"""
Parse (and ignore) the runway state.
The following attributes are set:
"""
pass
def _handleTrend(self, d):
"""
Parse (and ignore) the trend groups.
"""
if d.get('trend', None) is not None:
self._trend_groups.append(d['trend'])
self._trend = True
def _startRemarks(self, d):
"""
Found the start of the remarks section.
"""
self._remarks = []
def _handleSealvlPressRemark(self, d):
"""
Parse the sea-level Pressure remark group.
"""
value = float(d['press']) / 10.0
if value < 50:
value += 1000
else:
value += 900
if not self.press:
self.press = Pressure(value, "MB")
self.press_sea_level = Pressure(value, "MB")
def _handlePrecip24hrRemark(self, d):
"""
Parse a 3-, 6- or 24-hour cumulative preciptation remark group.
"""
value = float(d['precip']) / 100.0
if d['type'] == "6":
if self.cycle == 3 or self.cycle == 9 or self.cycle == 15 or self.cycle == 21:
self.precip_3hr = Precipitation(value, "IN")
else:
self.precip_6hr = Precipitation(value, "IN")
else:
self.precip_24hr = Precipitation(value, "IN")
def _handlePrecip1hrRemark(self, d):
"""Parse an hourly Precipitation remark group."""
value = float(d['precip']) / 100.0
self.precip_1hr = Precipitation(value, "IN")
def _handleTemp1hrRemark(self, d):
"""
Parse a Temperature & dewpoint remark group.
These values replace the temp and dewpt from the body of the report.
"""
value = float(d['temp']) / 10.0
if d['tsign'] == "1": value = -value
self.temp = Temperature(value)
if d['dewpt']:
value2 = float(d['dewpt']) / 10.0
if d['dsign'] == "1": value2 = -value2
self.dewpt = Temperature(value2)
def _handleTemp6hrRemark(self, d):
"""
Parse a 6-hour maximum or minimum Temperature remark group.
"""
value = float(d['temp']) / 10.0
if d['sign'] == "1": value = -value
if d['type'] == "1":
self.max_temp_6hr = Temperature(value, "C")
else:
self.min_temp_6hr = Temperature(value, "C")
def _handleTemp24hrRemark(self, d):
"""
Parse a 24-hour maximum/minimum Temperature remark group.
"""
value = float(d['maxt']) / 10.0
if d['smaxt'] == "1": value = -value
value2 = float(d['mint']) / 10.0
if d['smint'] == "1": value2 = -value2
self.max_temp_24hr = Temperature(value, "C")
self.min_temp_24hr = Temperature(value2, "C")
def _handlePress3hrRemark(self, d):
"""
Parse a Pressure-tendency remark group.
"""
value = float(d['press']) / 10.0
descrip = PRESSURE_TENDENCY[d['tend']]
self._remarks.append("3-hr Pressure change %.1fhPa, %s" % (value, descrip))
def _handlePeakWindRemark(self, d):
"""
Parse a peak wind remark group.
"""
peak_dir = int(d['dir'])
peak_speed = int(d['Speed'])
self.wind_speed_peak = Speed(peak_speed, "KT")
self.wind_dir_peak = Direction(peak_dir)
peak_min = int(d['min'])
if d['hour']:
peak_hour = int(d['hour'])
else:
peak_hour = self._hour
self.peak_wind_time = datetime.datetime(self._year, self._month, self._day,
peak_hour, peak_min)
if self.peak_wind_time > self.time:
if peak_hour > self._hour:
self.peak_wind_time -= datetime.timedelta(hours=24)
else:
self.peak_wind_time -= datetime.timedelta(hours=1)
self._remarks.append("peak wind %dkt from %d degrees at %d:%02d" % \
(peak_speed, peak_dir, peak_hour, peak_min))
def _handleWindShiftRemark(self, d):
"""
Parse a wind shift remark group.
"""
if d['hour']:
wshft_hour = int(d['hour'])
else:
wshft_hour = self._hour
wshft_min = int(d['min'])
self.wind_shift_time = datetime.datetime(self._year, self._month, self._day,
wshft_hour, wshft_min)
if self.wind_shift_time > self.time:
if wshft_hour > self._hour:
self.wind_shift_time -= datetime.timedelta(hours=24)
else:
self.wind_shift_time -= datetime.timedelta(hours=1)
text = "wind shift at %d:%02d" % (wshft_hour, wshft_min)
if d['front']:
text += " (front)"
self._remarks.append(text)
def _handleLightningRemark(self, d):
"""
Parse a lightning observation remark group.
"""
parts = []
if d['freq']:
parts.append(LIGHTNING_FREQUENCY[d['freq']])
parts.append("lightning")
if d['type']:
ltg_types = []
group = d['type']
while group:
ltg_types.append(LIGHTNING_TYPE[group[:2]])
group = group[2:]
parts.append("(" + ', '.join(ltg_types) + ")")
if d['loc']:
parts.append(xlate_loc(d['loc']))
self._remarks.append(' '.join(parts))
def _handleTSLocRemark(self, d):
"""
Parse a thunderstorm location remark group.
"""
text = "thunderstorm"
if d['loc']:
text += " " + xlate_loc(d['loc'])
if d['dir']:
text += " moving %s" % d['dir']
self._remarks.append(text)
def _handleAutoRemark(self, d):
"""
Parse an automatic Station remark group.
"""
if d['type'] == "1":
self._remarks.append("Automated Station")
elif d['type'] == "2":
self._remarks.append("Automated Station (type 2)")
def _unparsedRemark(self, d):
"""
Handle otherwise unparseable remark groups.
"""
self._unparsed_remarks.append(d['group'])
## the list of handler functions to use (in order) to process a METAR report
handlers = [(TYPE_RE, _handleType, False),
(STATION_RE, _handleStation, False),
(TIME_RE, _handleTime, False),
(MODIFIER_RE, _handleModifier, False),
(WIND_RE, _handleWind, False),
(VISIBILITY_RE, _handleVisibility, True),
(RUNWAY_RE, _handleRunway, True),
(WEATHER_RE, _handleWeather, True),
(SKY_RE, _handleSky, True),
(TEMP_RE, _handleTemp, False),
(PRESS_RE, _handlePressure, True),
(RECENT_RE, _handleRecent, True),
(WINDSHEAR_RE, _handleWindShear, True),
(COLOR_RE, _handleColor, True),
(RUNWAYSTATE_RE, _handleRunwayState, True),
(TREND_RE, _handleTrend, False),
(REMARK_RE, _startRemarks, False)]
trend_handlers = [(TRENDTIME_RE, _handleTrend, True),
(WIND_RE, _handleTrend, True),
(VISIBILITY_RE, _handleTrend, True),
(WEATHER_RE, _handleTrend, True),
(SKY_RE, _handleTrend, True),
(COLOR_RE, _handleTrend, True)]
## the list of patterns for the various remark groups,
## paired with the handler functions to use to record the decoded remark.
remark_handlers = [(AUTO_RE, _handleAutoRemark),
(SEALVL_PRESS_RE, _handleSealvlPressRemark),
(PEAK_WIND_RE, _handlePeakWindRemark),
(WIND_SHIFT_RE, _handleWindShiftRemark),
(LIGHTNING_RE, _handleLightningRemark),
(TS_LOC_RE, _handleTSLocRemark),
(TEMP_1HR_RE, _handleTemp1hrRemark),
(PRECIP_1HR_RE, _handlePrecip1hrRemark),
(PRECIP_24HR_RE, _handlePrecip24hrRemark),
(PRESS_3HR_RE, _handlePress3hrRemark),
(TEMP_6HR_RE, _handleTemp6hrRemark),
(TEMP_24HR_RE, _handleTemp24hrRemark),
(UNPARSED_RE, _unparsedRemark)]
## functions that return text representations of conditions for output
def string(self):
"""
Return a human-readable version of the decoded report.
"""
lines = []
lines.append("Station: %s" % self.station_id)
if self.type:
lines.append("type: %s" % self.report_type())
if self.time:
lines.append("time: %s" % self.time.ctime())
if self.temp:
lines.append("Temperature: %s" % self.temp.string("C"))
if self.dewpt:
lines.append("dew point: %s" % self.dewpt.string("C"))
if self.wind_speed:
lines.append("wind: %s" % self.wind())
if self.wind_speed_peak:
lines.append("peak wind: %s" % self.peak_wind())
if self.wind_shift_time:
lines.append("wind shift: %s" % self.wind_shift())
if self.vis:
lines.append("visibility: %s" % self.visibility())
if self.runway:
lines.append("visual range: %s" % self.runway_visual_range())
if self.press:
lines.append("Pressure: %s" % self.press.string("mb"))
if self.weather:
lines.append("weather: %s" % self.present_weather())
if self.sky:
lines.append("sky: %s" % self.sky_conditions("\n "))
if self.press_sea_level:
lines.append("sea-level Pressure: %s" % self.press_sea_level.string("mb"))
if self.max_temp_6hr:
lines.append("6-hour max temp: %s" % str(self.max_temp_6hr))
if self.max_temp_6hr:
lines.append("6-hour min temp: %s" % str(self.min_temp_6hr))
if self.max_temp_24hr:
lines.append("24-hour max temp: %s" % str(self.max_temp_24hr))
if self.max_temp_24hr:
lines.append("24-hour min temp: %s" % str(self.min_temp_24hr))
if self.precip_1hr:
lines.append("1-hour Precipitation: %s" % str(self.precip_1hr))
if self.precip_3hr:
lines.append("3-hour Precipitation: %s" % str(self.precip_3hr))
if self.precip_6hr:
lines.append("6-hour Precipitation: %s" % str(self.precip_6hr))
if self.precip_24hr:
lines.append("24-hour Precipitation: %s" % str(self.precip_24hr))
if self._remarks:
lines.append("remarks:")
lines.append("- " + self.remarks("\n- "))
if self._unparsed_remarks:
lines.append("- " + ' '.join(self._unparsed_remarks))
lines.append("METAR: " + self.code)
return '\n'.join(lines)
def report_type(self):
"""
Return a textual description of the report type.
"""
if self.type is None:
text = "unknown report type"
elif REPORT_TYPE.get(self.type) is not None:
text = REPORT_TYPE[self.type]
else:
text = self.type + " report"
if self.cycle:
text += ", cycle %d" % self.cycle
if self.mod:
if REPORT_TYPE.get(self.mod) is not None:
text += " (%s)" % REPORT_TYPE[self.mod]
else:
text += " (%s)" % self.mod
return text
def wind(self, units="KT"):
"""
Return a textual description of the wind conditions.
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_speed == None:
return "missing"
elif self.wind_speed.value() == 0.0:
text = "calm"
else:
wind_speed = self.wind_speed.string(units)
if not self.wind_dir:
text = "variable at %s" % wind_speed
elif self.wind_dir_from:
text = "%s to %s at %s" % \
(self.wind_dir_from.compass(), self.wind_dir_to.compass(), wind_speed)
else:
text = "%s at %s" % (self.wind_dir.compass(), wind_speed)
if self.wind_gust:
text += ", gusting to %s" % self.wind_gust.string(units)
return text
def peak_wind(self, units="KT"):
"""
Return a textual description of the peak wind conditions.
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_speed_peak == None:
return "missing"
elif self.wind_speed_peak.value() == 0.0:
text = "calm"
else:
wind_speed = self.wind_speed_peak.string(units)
if not self.wind_dir_peak:
text = wind_speed
else:
text = "%s at %s" % (self.wind_dir_peak.compass(), wind_speed)
if not self.peak_wind_time == None:
text += " at %s" % self.peak_wind_time.strftime('%H:%M')
return text
def wind_shift(self, units="KT"):
"""
Return a textual description of the wind shift time
Units may be specified as "MPS", "KT", "KMH", or "MPH".
"""
if self.wind_shift_time == None:
return "missing"
else:
return self.wind_shift_time.strftime('%H:%M')
def visibility(self, units=None):
"""
Return a textual description of the visibility.
Units may be statute miles ("SM") or meters ("M").
"""
if self.vis == None:
return "missing"
if self.vis_dir:
text = "%s to %s" % (self.vis.string(units), self.vis_dir.compass())
else:
text = self.vis.string(units)
if self.max_vis:
if self.max_vis_dir:
text += "; %s to %s" % (self.max_vis.string(units), self.max_vis_dir.compass())
else:
text += "; %s" % self.max_vis.string(units)
return text
def runway_visual_range(self, units=None):
"""
Return a textual description of the runway visual range.
"""
lines = []
for name, low, high in self.runway:
if low != high:
lines.append("on runway %s, from %d to %s" % (name, low.value(units), high.string(units)))
else:
lines.append("on runway %s, %s" % (name, low.string(units)))
return '; '.join(lines)
def present_weather(self):
"""
Return a textual description of the present weather.
"""
text_list = []
for weatheri in self.weather:
(inteni, desci, preci, obsci, otheri) = weatheri
text_parts = []
code_parts = []
if inteni:
code_parts.append(inteni)
text_parts.append(WEATHER_INT[inteni])
if desci:
code_parts.append(desci)
if desci != "SH" or not preci:
text_parts.append(WEATHER_DESC[desci[0:2]])
if len(desci) == 4:
text_parts.append(WEATHER_DESC[desci[2:]])
if preci:
code_parts.append(preci)
if len(preci) == 2:
precip_text = WEATHER_PREC[preci]
elif len(preci) == 4:
precip_text = WEATHER_PREC[preci[:2]] + " and "
precip_text += WEATHER_PREC[preci[2:]]
elif len(preci) == 6:
precip_text = WEATHER_PREC[preci[:2]] + ", "
precip_text += WEATHER_PREC[preci[2:4]] + " and "
precip_text += WEATHER_PREC[preci[4:]]
if desci == "TS":
text_parts.append("with")
text_parts.append(precip_text)
if desci == "SH":
text_parts.append(WEATHER_DESC[desci])
if obsci:
code_parts.append(obsci)
text_parts.append(WEATHER_OBSC[obsci])
if otheri:
code_parts.append(otheri)
text_parts.append(WEATHER_OTHER[otheri])
code = ' '.join(code_parts)
if WEATHER_SPECIAL.get(code) is not None:
text_list.append(WEATHER_SPECIAL[code])
else:
text_list.append(' '.join(text_parts))
return '; '.join(text_list)
def sky_conditions(self, sep="; "):
"""
Return a textual description of the sky conditions.
"""
text_list = []
for skyi in self.sky:
(cover, height, cloud) = skyi
if cover == "SKC" or cover == "CLR":
text_list.append(SKY_COVER[cover])
else:
if cloud:
what = CLOUD_TYPE[cloud]
elif cover != "OVC":
what = "clouds"
else:
what = ""
if cover == "VV":
text_list.append("%s%s, visibility to %s" %
(SKY_COVER[cover], what, str(height)))
else:
text_list.append("%s%s at %s" %
(SKY_COVER[cover], what, str(height)))
return sep.join(text_list)
def trend(self):
"""
Return the trend forecast groups
"""
return " ".join(self._trend_groups)
def remarks(self, sep="; "):
"""
Return the decoded remarks.
"""
return sep.join(self._remarks)
|
#!/usr/bin/env python3
"""
FastQC - A quality control analysis tool for high throughput sequencing data
https://github.com/s-andrews/FastQC
"""
import os
import re
from paleomix.common.command import AtomicCmd, InputFile, OutputFile
from paleomix.common.versions import Requirement
from paleomix.node import CommandNode
# File extensions striped by FASTQ for output filenames
_FASTQC_EXCLUDED_EXTENSIONS = re.compile(
r"(\.gz|\.bz2|\.txt|\.fastq|\.fq|\.csfastq|\.sam|\.bam)+$"
)
class FastQCNode(CommandNode):
def __init__(self, in_file, out_folder, options={}, dependencies=()):
out_prefix = _FASTQC_EXCLUDED_EXTENSIONS.sub("", os.path.basename(in_file))
command = AtomicCmd(
["fastqc", InputFile(in_file)],
extra_files=[
OutputFile(os.path.join(out_folder, out_prefix + "_fastqc.html")),
OutputFile(os.path.join(out_folder, out_prefix + "_fastqc.zip")),
],
requirements=[
Requirement(
name="FastQC",
call=["fastqc", "--version"],
regexp=r"FastQC v(\d+\.\d+\.\d+)",
),
],
)
command.merge_options(
user_options=options,
fixed_options={
"--outdir": "%(TEMP_DIR)s",
"--dir": "%(TEMP_DIR)s",
},
)
CommandNode.__init__(
self,
command=command,
description="fastQC of {}".format(in_file),
dependencies=dependencies,
)
|
ResponseSchema = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'type': 'object',
'properties': {
'status': {
'type': 'integer',
'minimum': 100,
'maximum': 699
}
},
'required': ['status']
}
|
# Generated by Django 2.1 on 2018-08-23 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('callapi', '0002_auto_20180820_2123'),
]
operations = [
migrations.AddField(
model_name='callrecord',
name='source',
field=models.CharField(max_length=11, null=True),
),
]
|
import sqlalchemy
from sqlalchemy import create_engine,text
import os
from sqlalchemy.orm import Session,sessionmaker
# SQL Alchemy Version Check
print(sqlalchemy.__version__) # 1.4.27
# SQL Alchmey Engine Oluştur ve İlgili Veri Tabanına Bağla
# print(os.getcwd())
dbUrl = "sqlite+pysqlite:///" + os.getcwd() + "\\database\\test.db"
engine = create_engine(dbUrl, echo=True, future=True)
conn = engine.connect()
queryAll = text("Select * from some_table")
result = conn.execute(queryAll)
dataList = result.all()
"""
# tuple olarak kullan
for x,y in dataList:
print("x:{} y:{}".format(x,y))
# indexler ile Kullan
for row in dataList:
print("x:{}, y:{}".format(row[0],row[1]))
# Kolon Adıyla Kullan
for row in dataList:
print("x:{} y:{}".format(row.x,row.y))
# Mapping İle Kullan
for row in result.mappings():
print("x:{} y:{}".format(row["x"],row["y"])) """
# Parametre İle Select İşlemi
queryparams = text("SELECT x, y FROM some_table WHERE y > :y and x < :x")
result = conn.execute(queryparams,{"y" : 4900, "x" : 999})
dataList = result.all()
for row in dataList:
print("x:{} y:{}".format(row.x,row.y))
# BindParams ile Select
queryparams = text("SELECT x, y FROM some_table WHERE y > :y and x < :x").bindparams(x=999,y=4980)
result = conn.execute(queryparams)
dataList = result.all()
for row in dataList:
print("x:{} y:{}".format(row.x,row.y))
queryparams = text("SELECT x, y FROM some_table WHERE y > :y and x < :x order by x desc").bindparams(x=999,y=4980)
result = conn.execute(queryparams)
dataList = result.all()
for row in dataList:
print("x:{} y:{}".format(row.x,row.y))
|
import sqlite3
import requests
import time
import logging
import random
from flask import Flask, jsonify
import elasticapm
from elasticapm.contrib.flask import ElasticAPM
from elasticapm.handlers.logging import LoggingHandler
names = ['ruan', 'stefan', 'philip', 'norman',
'frank', 'pete', 'johnny', 'peter', 'adam']
cities = ['cape town', 'johannesburg', 'pretoria', 'dublin',
'kroonstad', 'bloemfontein', 'port elizabeth', 'auckland', 'sydney']
lastnames = ['smith', 'bekker', 'admams', 'phillips', 'james', 'adamson']
conn = sqlite3.connect('database.db')
conn.execute(
'CREATE TABLE IF NOT EXISTS people (name STRING, age INTEGER, surname STRING, city STRING)')
#sqlquery_write = conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(names), random.randint(18,40), random.choice(lastnames), random.choice(cities)))
seconds = [0.002, 0.003, 0.004, 0.01, 0.3, 0.2, 0.009,
0.015, 0.02, 0.225, 0.009, 0.001, 0.25, 0.030, 0.018]
app = Flask(__name__)
app.config['ELASTIC_APM'] = {
# Set required service name. Allowed characters:
# a-z, A-Z, 0-9, -, _, and space
# Set custom APM Server URL (default: http://localhost:8200)
'SERVER_URL': 'http://apm-server:8200',
'DEBUG': True,
'TRACES_SEND_FREQ': 5,
'SECRET_TOKEN': 'xxVpmQB2HMzCL9PgBHVrnxjNXXw5J7bd79DFm6sjBJR5HPXDhcF8MSb3vv4bpg44',
'SERVICE_NAME': 'flaskapp',
'FLUSH_INTERVAL': 1, # 2.x
'MAX_QUEUE_SIZE': 1, # 2.x
'TRANSACTIONS_IGNORE_PATTERNS': ['.*healthcheck']
}
apm = ElasticAPM(app, logging=True)
@app.route('/')
def index():
elasticapm.set_custom_context({'environment': 'local test'})
elasticapm.tag(testbla=True)
logging.error("Blablabla")
return jsonify({"message": "response ok bla"})
@app.route('/delay')
def delay():
time.sleep(random.choice(seconds))
return jsonify({"message": "response delay"})
@app.route('/upstream')
def upstream():
r = requests.get('https://api.ruanbekker.com/people').json()
r.get('country')
if r.get('country') == 'italy':
return 'Italalia!', 200
elif r.get('country') == 'canada':
return 'Canada!', 502
else:
return 'Not Found', 404
@app.route('/5xx')
def fail_with_5xx():
value = 'a' + 1
return jsonify({"message": value})
@app.route('/sql-write')
def sqlw():
conn = sqlite3.connect('database.db')
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(
names), random.randint(18, 40), random.choice(lastnames), random.choice(cities)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(
names), random.randint(18, 40), random.choice(lastnames), random.choice(cities)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(
names), random.randint(18, 40), random.choice(lastnames), random.choice(cities)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(
names), random.randint(18, 40), random.choice(lastnames), random.choice(cities)))
conn.execute('INSERT INTO people VALUES("{}", "{}", "{}", "{}")'.format(random.choice(
names), random.randint(18, 40), random.choice(lastnames), random.choice(cities)))
conn.commit()
conn.close()
return 'ok', 200
@app.route('/sql-read')
def sqlr():
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('select * from people')
rows = cur.fetchall()
conn.close()
return 'ok', 200
@app.route('/sql-group')
def slqg():
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('select count(*) as num, city from people group by city')
rows = cur.fetchall()
conn.close()
return 'ok', 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.