content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/python
# @copyright Copyright 2019 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
# @revs_title
# @revs_begin
# @rev_entry(Jason Harvey, CACI, GUNNS, March 2019, --, Initial implementation.}
# @revs_end
#
import xml.etree.ElementTree as ET
import re
# Indents and newlines the given XML elements and subelements for reading and merging.
def formatXml(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
formatXml(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# This parses the given xml file with all namespaces stripped out.
#
# This aims to reproduce the ns_clean option of XMLParser in the lxml 3rd party library.
# We don't use lxml because we want to minimize the install burden on users.
#
# Returns the root element.
def parseClean(file):
root_tag = ''
namespaces = []
with open(file, 'r') as fin:
# Find the root element tag and the namespaces
for line in fin:
if not line.startswith('<?xml'):
fields = line.split(' ')
root_tag = fields[0][1:]
for field in fields[1:]:
if 'xmlns' in field:
attribs = field.split('=')[0].split(':')
if len(attribs) == 2:
namespaces.append(attribs[1])
break
with open(file, 'r') as fin:
fin_str = fin.read()
# Replace the root element with just the tag.
search = re.search(r'<' + root_tag + '.+?>', fin_str)
if (search):
sub_out = search.group(0)
if sub_out.endswith('/>'):
sub_in = '<' + root_tag + '/>'
else:
sub_in = '<' + root_tag + '>'
fin_str = re.sub(sub_out, sub_in, fin_str)
for namespace in namespaces:
# Find all occurrences of the namespace, ns, in <*ns:> without a quote between
# < and ns, and remove duplicates.
occurrences = list(set(re.findall(r'<[^"\'>]*' + namespace + ':', fin_str)))
for occurrence in occurrences:
# Clean all occurrences of the namespace
sub_in = occurrence.replace(namespace + ':', '')
fin_str = re.sub(occurrence, sub_in, fin_str)
return ET.fromstring(fin_str)
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adb."""
from io import BytesIO
import struct
import unittest
from mock import mock
from adb import common
from adb import adb_commands
from adb import adb_protocol
from adb.usb_exceptions import TcpTimeoutException, DeviceNotFoundError
import common_stub
BANNER = b'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
class BaseAdbTest(unittest.TestCase):
@classmethod
def _ExpectWrite(cls, usb, command, arg0, arg1, data):
usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data))
usb.ExpectWrite(data)
if command == b'WRTE':
cls._ExpectRead(usb, b'OKAY', 0, 0)
@classmethod
def _ExpectRead(cls, usb, command, arg0, arg1, data=b''):
usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data))
if data:
usb.ExpectRead(data)
if command == b'WRTE':
cls._ExpectWrite(usb, b'OKAY', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _ConvertCommand(cls, command):
return sum(c << (i * 8) for i, c in enumerate(bytearray(command)))
@classmethod
def _MakeHeader(cls, command, arg0, arg1, data):
command = cls._ConvertCommand(command)
magic = command ^ 0xFFFFFFFF
checksum = adb_protocol.AdbMessage.CalculateChecksum(data)
return struct.pack(b'<6I', command, arg0, arg1, len(data), checksum, magic)
@classmethod
def _ExpectConnection(cls, usb):
cls._ExpectWrite(usb, b'CNXN', 0x01000000, 4096, b'host::%s\0' % BANNER)
cls._ExpectRead(usb, b'CNXN', 0, 0, b'device::\0')
@classmethod
def _ExpectOpen(cls, usb, service):
cls._ExpectWrite(usb, b'OPEN', LOCAL_ID, 0, service)
cls._ExpectRead(usb, b'OKAY', REMOTE_ID, LOCAL_ID)
@classmethod
def _ExpectClose(cls, usb):
cls._ExpectRead(usb, b'CLSE', REMOTE_ID, 0)
cls._ExpectWrite(usb, b'CLSE', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _Connect(cls, usb):
return adb_commands.AdbCommands.Connect(usb, BANNER)
class AdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(usb)
return usb
def testConnect(self):
usb = common_stub.StubUsb(device=None, setting=None)
self._ExpectConnection(usb)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
def testConnectSerialString(self):
dev = adb_commands.AdbCommands()
with mock.patch.object(common.UsbHandle, 'FindAndOpen', return_value=None):
with mock.patch.object(adb_commands.AdbCommands, '_Connect', return_value=None):
dev.ConnectDevice(serial='/dev/invalidHandle')
def testSmallResponseShell(self):
command = b'keepin it real'
response = 'word.'
usb = self._ExpectCommand(b'shell', command, response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Shell(command))
def testBigResponseShell(self):
command = b'keepin it real big'
# The data doesn't have to be big, the point is that it just concatenates
# the data from different WRTEs together.
responses = [b'other stuff, ', b'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(b''.join(responses).decode('utf8'),
dev.Shell(command))
def testUninstall(self):
package_name = "com.test.package"
response = 'Success'
usb = self._ExpectCommand(b'shell', ('pm uninstall "%s"' % package_name).encode('utf8'), response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Uninstall(package_name))
def testStreamingResponseShell(self):
command = b'keepin it real big'
# expect multiple lines
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
response_count = 0
for (expected,actual) in zip(responses, dev.StreamingShell(command)):
self.assertEqual(expected, actual)
response_count = response_count + 1
self.assertEqual(len(responses), response_count)
def testReboot(self):
usb = self._ExpectCommand(b'reboot', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Reboot()
def testRebootBootloader(self):
usb = self._ExpectCommand(b'reboot', b'bootloader', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.RebootBootloader()
def testRemount(self):
usb = self._ExpectCommand(b'remount', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Remount()
def testRoot(self):
usb = self._ExpectCommand(b'root', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Root()
def testEnableVerity(self):
usb = self._ExpectCommand(b'enable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.EnableVerity()
def testDisableVerity(self):
usb = self._ExpectCommand(b'disable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.DisableVerity()
class FilesyncAdbTest(BaseAdbTest):
@classmethod
def _MakeSyncHeader(cls, command, *int_parts):
command = cls._ConvertCommand(command)
return struct.pack(b'<%dI' % (len(int_parts) + 1), command, *int_parts)
@classmethod
def _MakeWriteSyncPacket(cls, command, data=b'', size=None):
if not isinstance(data, bytes):
data = data.encode('utf8')
return cls._MakeSyncHeader(command, size or len(data)) + data
@classmethod
def _ExpectSyncCommand(cls, write_commands, read_commands):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'sync:\0')
while write_commands or read_commands:
if write_commands:
command = write_commands.pop(0)
cls._ExpectWrite(usb, b'WRTE', LOCAL_ID, REMOTE_ID, command)
if read_commands:
command = read_commands.pop(0)
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, LOCAL_ID, command)
cls._ExpectClose(usb)
return usb
def testPush(self):
filedata = b'alo there, govnah'
mtime = 100
send = [
self._MakeWriteSyncPacket(b'SEND', b'/data,33272'),
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE', size=mtime),
]
data = b'OKAY\0\0\0\0'
usb = self._ExpectSyncCommand([b''.join(send)], [data])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Push(BytesIO(filedata), '/data', mtime=mtime)
def testPull(self):
filedata = b"g'ddayta, govnah"
recv = self._MakeWriteSyncPacket(b'RECV', b'/data')
data = [
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE'),
]
usb = self._ExpectSyncCommand([recv], [b''.join(data)])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(filedata, dev.Pull('/data'))
class TcpTimeoutAdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
tcp = common_stub.StubTcp('10.0.0.123')
cls._ExpectConnection(tcp)
cls._ExpectOpen(tcp, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(tcp, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(tcp)
return tcp
def _run_shell(self, cmd, timeout_ms=None):
tcp = self._ExpectCommand(b'shell', cmd)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
dev.Shell(cmd, timeout_ms=timeout_ms)
def testConnect(self):
tcp = common_stub.StubTcp('10.0.0.123')
self._ExpectConnection(tcp)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
def testTcpTimeout(self):
timeout_ms = 1
command = b'i_need_a_timeout'
self.assertRaises(
TcpTimeoutException,
self._run_shell,
command,
timeout_ms=timeout_ms)
class TcpHandleTest(unittest.TestCase):
def testInitWithHost(self):
tcp = common_stub.StubTcp('10.11.12.13')
self.assertEqual('10.11.12.13:5555', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithHostAndPort(self):
tcp = common_stub.StubTcp('10.11.12.13:5678')
self.assertEqual('10.11.12.13:5678', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithTimeout(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234.5)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.5, tcp._timeout_ms)
def testInitWithTimeoutInt(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.0, tcp._timeout_ms)
if __name__ == '__main__':
unittest.main()
|
import json
def parse(filename):
"""
Creates a json object from a file
Args:
filename: the path to the file
Returns:
a json object
"""
with open(filename, 'r') as f:
data = json.load(f)
return data
|
#!/usr/bin/python3.5
# -*- coding: utf-8 -*-
if __name__ == "__main__":
from constantes import famosos
print(famosos)
|
import pygame, sys, random, classes
pygame.mixer.init()
def process(player, FPS, total_frames, SCREENWIDTH):
velocity = 10
bullet_sound = pygame.mixer.Sound("audio/bullet.wav")
bullet_sound.set_volume(0.3)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_d:
player.velx = velocity
if event.key == pygame.K_a:
player.velx = -velocity
if event.key == pygame.K_w:
player.vely = -velocity
if event.key == pygame.K_s:
player.vely = velocity
if event.type == pygame.KEYUP:
if event.key == pygame.K_d:
player.velx = 0
if event.key == pygame.K_a:
player.velx = 0
if event.key == pygame.K_w:
player.vely = 0
if event.key == pygame.K_s:
player.vely = 0
player.rect.x += player.velx
player.rect.y += player.vely
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
bullet_sound.play()
classes.Bullet(player.rect.x, player.rect.y, "images/bullet.png")
Create_Flight(FPS, total_frames, SCREENWIDTH)
Collisions()
def Create_Flight(FPS, total_frames, SCREENWIDTH):
three_second = FPS * 3
if total_frames % three_second == 0:
x = random.randint(1, SCREENWIDTH - 50)
classes.Flight(x, -20, "images/craft.png")
def Collisions():
collision_sound = pygame.mixer.Sound("audio/explosion.wav")
collision_sound.set_volume(0.5)
for flight in classes.Flight.List:
if pygame.sprite.spritecollide(flight, classes.Bullet.List, True):
collision_sound.play()
flight.health -= 50
if flight.health == 0:
classes.Player.spare_list[0].get_score()
flight.destroy(classes.Flight)
else:
flight.image = pygame.image.load("images/explosion.png")
flight.vely -= 1
for bullet in classes.Bullet.List:
if pygame.sprite.spritecollide(bullet, classes.Flight.List, False):
bullet.rect.y += 600
bullet.destroy()
def gameover():
for player in classes.Player.List:
if pygame.sprite.spritecollide(player, classes.Flight.List, False):
return True
|
from collections import deque
import logging
# Project imports
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
pipeline_logging.configure(logging.INFO, add_console_handler=True)
RDR_SEX = 'rdr_sex'
EHR_SEX = 'ehr_sex'
MATCH_STATUS = 'match_status'
MATCH = 'match'
NO_MATCH = 'no_match'
MISSING_EHR = 'missing_ehr'
MISSING_RDR = 'missing_rdr'
MATCH_STATUS_PAIRS = 'match_status_pairs'
GENDER_MATCH = [{
MATCH_STATUS:
MATCH,
MATCH_STATUS_PAIRS: [{
RDR_SEX: ["SexAtBirth_Male"],
EHR_SEX: ["MALE"]
}, {
RDR_SEX: ["SexAtBirth_Female"],
EHR_SEX: ["FEMALE"]
}, {
RDR_SEX: ["SexAtBirth_SexAtBirthNoneOfThese"],
EHR_SEX: ["UNKNOWN", "OTHER", "AMBIGUOUS"]
}]
}, {
MATCH_STATUS:
NO_MATCH,
MATCH_STATUS_PAIRS: [{
RDR_SEX: ["SexAtBirth_Male"],
EHR_SEX: [
"UNKNOWN", "Gender unknown", "AMBIGUOUS", "Gender unspecified",
"OTHER", "FEMALE"
]
}, {
RDR_SEX: ["SexAtBirth_Female"],
EHR_SEX: [
"UNKNOWN", "Gender unknown", "AMBIGUOUS", "Gender unspecified",
"OTHER", "MALE"
]
}, {
RDR_SEX: ["SexAtBirth_Intersex"],
EHR_SEX: [
"AMBIGUOUS", "Gender unknown", "Gender unspecified", "FEMALE",
"MALE", "UNKNOWN", "OTHER"
]
}, {
RDR_SEX: ["SexAtBirth_SexAtBirthNoneOfThese"],
EHR_SEX: ["FEMALE", "MALE", "Gender unspecified", "Gender unknown"]
}]
}, {
MATCH_STATUS:
MISSING_EHR,
MATCH_STATUS_PAIRS: [{
RDR_SEX: [
"SexAtBirth_Male", "SexAtBirth_Female", "SexAtBirth_Intersex",
"SexAtBirth_SexAtBirthNoneOfThese"
],
EHR_SEX: ["No matching concept"]
}]
}, {
MATCH_STATUS:
MISSING_RDR,
MATCH_STATUS_PAIRS: [{
RDR_SEX: ["UNSET", "PMI_Skip", "PMI_PreferNotToAnswer"],
EHR_SEX: [
"MALE", "OTHER", "Gender unspecified", "AMBIGUOUS", "FEMALE",
"UNKNOWN", "Gender unknown", "No matching concept"
]
}]
}]
# State abbreviations. Used to validate state abbreviations.
STATE_ABBREVIATIONS = [
'al',
'ak',
'az',
'ar',
'ca',
'co',
'ct',
'de',
'fl',
'ga',
'hi',
'id',
'il',
'in',
'ia',
'ks',
'ky',
'la',
'me',
'md',
'ma',
'mi',
'mn',
'ms',
'mo',
'mt',
'ne',
'nv',
'nh',
'nj',
'nm',
'ny',
'nc',
'nd',
'oh',
'ok',
'or',
'pa',
'ri',
'sc',
'sd',
'tn',
'tx',
'ut',
'vt',
'va',
'wa',
'wv',
'wi',
'wy',
# Commonwealth/Territory:
'as',
'dc',
'fm',
'gu',
'mh',
'mp',
'pw',
'pr',
'vi',
# Military "State":
'aa',
'ae',
'ap',
]
ADDRESS_ABBREVIATIONS = {
'aly': 'alley',
'anx': 'annex',
'apt': 'apartment',
'ave': 'avenue',
'bch': 'beach',
'bldg': 'building',
'blvd': 'boulevard',
'bnd': 'bend',
'btm': 'bottom',
'cir': 'circle',
'ct': 'court',
'co': 'county',
'ctr': 'center',
'dr': 'drive',
'e': 'east',
'expy': 'expressway',
'hts': 'heights',
'hwy': 'highway',
'is': 'island',
'jct': 'junction',
'lk': 'lake',
'ln': 'lane',
'mtn': 'mountain',
'n': 'north',
'ne': 'northeast',
'num': 'number',
'nw': 'northwest',
'pkwy': 'parkway',
'pl': 'place',
'plz': 'plaza',
'po': 'post office',
'rd': 'road',
'rdg': 'ridge',
'rr': 'rural route',
'rm': 'room',
's': 'south',
'se': 'southeast',
'sq': 'square',
'st': 'street',
'str': 'street',
'sta': 'station',
'ste': 'suite',
'sw': 'southwest',
'ter': 'terrace',
'tpke': 'turnpike',
'trl': 'trail',
'vly': 'valley',
'w': 'west',
'way': 'way',
}
CITY_ABBREVIATIONS = {
'st': 'saint',
'afb': 'air force base',
}
def get_gender_comparison_case_statement():
conditions = []
for match in GENDER_MATCH:
and_conditions = []
for dict_ in match[MATCH_STATUS_PAIRS]:
and_conditions.append(
f"(rdr_sex in {[pair.lower() for pair in dict_[RDR_SEX]]} AND ehr_sex in {[pair.lower() for pair in dict_[EHR_SEX]]})"
)
all_matches = ' OR '.join(and_conditions)
all_matches = all_matches.replace('[', '(').replace(']', ')')
conditions.append(f'WHEN {all_matches} THEN \'{match[MATCH_STATUS]}\'')
return ' \n'.join(conditions)
def get_state_abbreviations():
""" Returns lowercase state abbreviations separated by comma as string.
e.g. 'al','ak','az',...
"""
return ','.join(f"'{state}'" for state in STATE_ABBREVIATIONS)
def _get_replace_statement(base_statement, rdr_ehr, field, dict_abbreviation):
"""
Create a nested REGEXP_REPLACE() statement for specified field and rdr/ehr.
:param: base_statement - Function that returns the base statement to use REGEXP_REPLACE() for
:param: rdr_ehr - string 'rdr' or 'ehr'
:param: field - string 'city' or 'street'
:param: dict_abbreviation - dictionary that has abbreviations
:return: Nested REGEXP_REPLACE statement as string
"""
statement_parts = deque([base_statement(rdr_ehr, field)])
for key in dict_abbreviation:
statement_parts.appendleft(
"REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(")
statement_parts.append(f",'^{key} ','{dict_abbreviation[key]} ')")
statement_parts.append(f",' {key}$',' {dict_abbreviation[key]}')")
statement_parts.append(f",' {key} ',' {dict_abbreviation[key]} ')")
statement_parts.appendleft(f"normalized_{rdr_ehr}_{field} AS (SELECT ")
statement_parts.append(f" AS {rdr_ehr}_{field})")
return ''.join(statement_parts)
def get_with_clause(field):
"""
Create WITH statement for CREATE_{field}_COMPARISON_FUNCTION.
:param: field - string 'city' or 'street'
:return: WITH statement as string
"""
valid_fields = {'city', 'street'}
if field not in valid_fields:
raise ValueError(
f"Invalid field name: {field}. Valid field names: {valid_fields}")
base_statement = {
'city':
lambda rdr_ehr, field:
f"REGEXP_REPLACE(REGEXP_REPLACE(LOWER(TRIM({rdr_ehr}_{field})),'[^A-Za-z ]',''),' +',' ')",
'street':
lambda rdr_ehr, field:
(f"REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(REGEXP_REPLACE(LOWER(TRIM({rdr_ehr}_{field})),"
f"'[^0-9A-Za-z ]', ''),'([0-9])(?:st|nd|rd|th)', r'\\1'),'([0-9])([a-z])',r'\\1 \\2'),' +',' ')"
),
}
abbreviations = {
'city': CITY_ABBREVIATIONS,
'street': ADDRESS_ABBREVIATIONS,
}
statement_parts = ["WITH "]
statement_parts.append(
_get_replace_statement(base_statement[field], 'rdr', field,
abbreviations[field]))
statement_parts.append(",")
statement_parts.append(
_get_replace_statement(base_statement[field], 'ehr', field,
abbreviations[field]))
statement = ''.join(statement_parts)
LOGGER.info(f"Created WITH clause: {statement}")
return statement
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from lookoutvision import manifest
BUCKET_NAME = "mybucket"
S3_PATH = "mypath"
DATASETS = ["training", "validation"]
class DummyFramework(manifest.Manifest):
"""docstring for DummyFramework"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def generate_manifests(self):
return {
"training": {
"normal": {
"source-ref": "s3://{}/training/normal/good.jpg".format(BUCKET_NAME),
"auto-label": 1,
"auto-label-metadata": {
"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "normal",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
},
"anomaly": {
"source-ref": "s3://{}/training/anomaly/bad.jpg".format(BUCKET_NAME),
"auto-label": 0,
"auto-label-metadata": {"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "anomaly",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
}
},
"validation": {
"normal": {
"source-ref": "s3://{}/validation/normal/good.jpg".format(BUCKET_NAME),
"auto-label": 1,
"auto-label-metadata": {
"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "normal",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
},
"anomaly": {
"source-ref": "s3://{}/validation/anomaly/bad.jpg".format(BUCKET_NAME),
"auto-label": 0,
"auto-label-metadata": {"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "anomaly",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
}
}
}
def push_manifests(self):
success = {}
for ds in self.datasets:
success[ds] = {
"bucket": "{}".format(self.bucket),
"key": "{}.manifest".format(ds),
"location": "s3://{}/{}.manifest".format(self.bucket, ds)
}
return success
def test_get_bucket():
mft = manifest.Manifest(bucket=BUCKET_NAME, s3_path=S3_PATH, datasets=DATASETS)
assert mft.get_bucket() == BUCKET_NAME
def test_get_datasets():
mft = manifest.Manifest(bucket=BUCKET_NAME, s3_path=S3_PATH, datasets=DATASETS)
assert mft.get_datasets() == DATASETS
def test_push_manifests():
mft = DummyFramework(bucket=BUCKET_NAME, s3_path=S3_PATH, datasets=DATASETS)
assert mft.push_manifests() == {
"training": {
"bucket": "mybucket",
"key": "training.manifest",
"location": "s3://mybucket/training.manifest"
},
"validation": {
"bucket": "mybucket",
"key": "validation.manifest",
"location": "s3://mybucket/validation.manifest"
}
}
def test_generate_manifests():
mft = DummyFramework(bucket=BUCKET_NAME, s3_path=S3_PATH, datasets=DATASETS)
assert mft.generate_manifests() == {
"training": {
"normal": {
"source-ref": "s3://mybucket/training/normal/good.jpg",
"auto-label": 1,
"auto-label-metadata": {
"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "normal",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
},
"anomaly": {
"source-ref": "s3://mybucket/training/anomaly/bad.jpg",
"auto-label": 0,
"auto-label-metadata": {"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "anomaly",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
}
},
"validation": {
"normal": {
"source-ref": "s3://mybucket/validation/normal/good.jpg",
"auto-label": 1,
"auto-label-metadata": {
"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "normal",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
},
"anomaly": {
"source-ref": "s3://mybucket/validation/anomaly/bad.jpg",
"auto-label": 0,
"auto-label-metadata": {"confidence": 1,
"job-name": "labeling-job/auto-label",
"class-name": "anomaly",
"human-annotated": "yes",
"creation-date": "1970-01-01T00:00:00.0",
"type": "groundtruth/image-classification"
}
}
}
}
|
from datetime import date, datetime
weekdays_en = {0:"mon", 1:"tue", 2:"wen", 3:"thu", 4:"fri"}
weekdays_kr = {0:"์", 1:"ํ", 2:"์", 3:"๋ชฉ", 4:"๊ธ"}
def now_weekday(kr=False):
return weekdays_en[datetime.now().weekday()] if not kr else weekdays_kr[datetime.now().weekday()]
def get_date_string():
now = datetime.now()
return f"{now.year}๋
{now.month}์ {now.day}์ผ ({now_weekday(kr=True)})"
def current_school_time():
now = datetime.now().strftime("%H:%M")
period = -1
if "07:50"<=now<="08:55":
period=0
elif "08:56"<=now<="09:55":
period=1
elif "09:56"<=now<="10:55":
period=2
elif "10:56"<=now<="11:55":
period=3
elif "11:56"<=now<="13:05":
period=-2 # ์ ์ฌ
elif "13:06"<=now<="13:55":
period=4
elif "13:56"<=now<="14:55":
period=5
elif "14:56"<=now<="15:55":
period=6
return period
# 7:50 ~ 8:55
# 8:56 ~ 9:55
# 9:56 ~ 10:55
# 10:56 ~ 11:55
# 11:56 ~ 13:05 (์ ์ฌ)
# 13:06 ~ 13:55
# 13:56 ~ 14:55
# 14:56 ~ 15:55
|
"""
Django settings for estore project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from oscar.defaults import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = 'py0($bhd@o6b(lq!1n2qux!5!w6n4z62^b(*!vk0y70v=vostl'
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'py0($bhd@o6b(lq!1n2qux!5!w6n4z62^b(*!vk0y70v=vostl')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool(os.environ.get('DJANGO_DEBUG', True))
SECURE_HSTS_SECONDS = 600
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
X_FRAME_OPTIONS = 'DENY'
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
ALLOWED_HOSTS = []
# Application definition
from oscar import get_core_apps
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'widget_tweaks',
'stripe',
'policy',
] + get_core_apps(['apps.checkout', 'apps.promotions'])
SITE_ID = 1
STRIPE_PUBLISHABLE_KEY = "pk_test_PKq39FCWPpAMRJtpb0IIZqZK"
STRIPE_SECRET_KEY = "sk_test_2aZuqLT89hbIvnRqOXJ3Ml4d"
STRIPE_CURRENCY = "USD"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.EmailBackend',
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'estore.urls'
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.contrib.messages.context_processors.messages',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
],
},
},
]
WSGI_APPLICATION = 'estore.wsgi.application'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
OSCAR_SHOP_NAME = 'Zhang Ziyi'
OSCAR_SHOP_TAGLINE = 'Jewels'
OSCAR_DEFAULT_CURRENCY = 'USD'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'ATOMIC_REQUESTS': True,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "static")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "media")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
|
from flask import g
from datetime import datetime, timedelta
from ..common.db_connect import sql_command, sql_select
def add_rental(new_rental):
'''Add a row to the rentals table using the given information and add a row to the inventory_rentals for each inventory item
Args:
new_rental: PaymentInfo class object.
Returns:
int: The return value. Rental ID if successful.
'''
query = (
'INSERT INTO rentals (customer_id, rented_by, rented_on, due_date) VALUES (%s, %s, %s, %s);')
data = (new_rental.customer_id, g.id, datetime.now(),
datetime.now() + timedelta(days=5))
rental_id = sql_command(query, data)
inventory_ids = [x.strip() for x in new_rental.inventory_ids.split(',')]
for inventory_id in inventory_ids:
query = (
'INSERT INTO inventory_rentals (inventory_id, rental_id) VALUES (%s, %s);')
data = (inventory_id, rental_id)
sql_command(query, data)
return rental_id
def get_all_current_rentals():
'''Retrieves current rentals from the all_rentals view.
Returns:
list: The return value. All rows from the select statement.
'''
query = 'SELECT * FROM all_rentals WHERE ISNULL(returned_on);'
data = ()
return sql_select(query, data)
def get_current_rental(rental_id):
'''Retrieve the current rental from the all_rentals view matching the target rental ID.
Args:
rental_id: Target rental ID.
Returns:
list: The return value. The row from the select statement.
'''
query = 'SELECT * FROM all_rentals WHERE ISNULL(returned_on) AND id = %s;'
data = (rental_id,)
return sql_select(query, data)
def return_rentals(rental_id):
'''Add a date to the returned_on column for the rental ID
Args:
rental_id: Target rental ID.
Returns:
int: The return value. 0 if successful.
'''
query = ('UPDATE rentals SET returned_on = %s WHERE id = %s;')
data = (datetime.now(), rental_id)
return sql_command(query, data)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from interface import EosMaterial
from evtk.hl import gridToVTK
import numpy as np
from numpy.testing import assert_allclose
def _3d_reshape(arr):
return arr.reshape(arr.shape+(1,))
def save_eostab_to_vtk(eosmat, filename):
for view in ['DT']:
for spec in ['e', 'i', 't']:
pattern = '_'.join([spec,view])
ftables = [el for el in eosmat.tables if pattern in el]
if len(ftables):
xl = []
yl = []
data = {}
for tab_name in ftables:
tab = getattr(eosmat, tab_name)
xl.append(tab['R_Array'])
yl.append(tab['T_Array'])
data[tab_name] = _3d_reshape(tab['F_Array'])
for x in xl:
assert_allclose(x,xl[0])
for y in xl:
assert_allclose(y,xl[0])
X, Y = np.meshgrid(x,y, indexing='ij')
X = _3d_reshape(X)
Y = _3d_reshape(Y)
Z = np.zeros(X.shape)
#print X
gridToVTK("./{0}".format(filename), X, Y, Z, pointData=data)
#def convert_to_vtk():
# eosmat = EosMaterial(3720, ['Pt_DT'])
# save_eostab_to_vtk(eosmat, '3720')
|
# -*- coding: utf-8 -*-
import pytest
from docopt import DocoptExit
def test_help_run():
from folklore_cli.cmds.help import run
cmd = run(['serve'])
assert cmd == 'serve'
def test_validation():
from folklore_cli.cmds.help import run
with pytest.raises(DocoptExit) as e:
run([])
assert 'Usage' in str(e.value)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-03-28 12:23
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('filter', '0016_auto_20180328_0759'),
]
operations = [
migrations.AlterModelOptions(
name='boolfilterfield',
options={'verbose_name': 'Fields BOOLEAN', 'verbose_name_plural': 'Fields BOOLEAN'},
),
migrations.AlterModelOptions(
name='choicefilterfield',
options={'verbose_name': 'Fields SELECTIVE', 'verbose_name_plural': 'Fields SELECTIVE'},
),
migrations.AlterModelOptions(
name='filterfield',
options={'ordering': ('order',), 'verbose_name': 'ALL FILTERS----------------------------------------------------', 'verbose_name_plural': 'ALL FILTERS----------------------------------------------------'},
),
migrations.AlterModelOptions(
name='placeboolfilterfield',
options={'verbose_name': 'Place fields BOOLEAN', 'verbose_name_plural': 'Place fields BOOLEAN'},
),
migrations.AlterModelOptions(
name='placechoicefilterfield',
options={'verbose_name': 'Place fields SELECTIVE', 'verbose_name_plural': 'Place fields SELECTIVE'},
),
migrations.AlterModelOptions(
name='placefilterfield',
options={'verbose_name': 'PLACE FIELDS--------------------------------------------------', 'verbose_name_plural': 'PLACE FIELDS--------------------------------------------------'},
),
migrations.AlterModelOptions(
name='placetextfilterfield',
options={'verbose_name': 'Place fields TEXTUAL', 'verbose_name_plural': 'Place fields TEXTUAL'},
),
migrations.AlterModelOptions(
name='textfilterfield',
options={'verbose_name': 'Fields TEXTUAL', 'verbose_name_plural': 'Fields TEXTUAL'},
),
]
|
import datetime
import requests
from dataclasses import dataclass
from typing import Optional
from TemporaryStorage.providers import Provider, File, HostedFile
@dataclass
class ProviderInstance(Provider):
def __provider_init__(self):
self.provider = 'x0.at'
self.max_file_size = 1024
self.min_retention = 10
self.max_retention = 360
self.base_url = 'https://x0.at'
def __calc_retention_date__(self, file: File) -> datetime:
retention = int(self.min_retention + (
self.max_retention - self.min_retention
) * int(1 - (file.file_size / self.max_retention)) ^ 2)
if retention < self.min_retention:
retention = self.min_retention
return datetime.datetime.utcnow() + datetime.timedelta(days=retention)
def check_file(self, file: File) -> bool:
if file.file_size > self.max_file_size:
return False
return True
def upload(self, file: File) -> Optional[HostedFile]:
req = requests.post(self.base_url,
files={"file": open(file.path, 'rb')})
if req.status_code != 200:
return
return HostedFile(
provider=self.provider,
url=req.text.split('\n')[0],
retention_to=self.__calc_retention_date__(file)
)
|
import os
import pytest
directory = os.path.dirname(__file__)
@pytest.fixture
def http_mock(requests_mock):
text = open(os.path.join(directory, "data/conference_6.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/conferences/6", text=text)
text = open(os.path.join(directory, "data/division_18.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/divisions/18", text=text)
text = open(os.path.join(directory, "data/franchise_24.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/franchises/24", text=text)
text = open(os.path.join(directory, "data/game_2017030415.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/game/2017030415/feed/live", text=text)
text = open(os.path.join(directory, "data/player_8471214.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/people/8471214", text=text)
text = open(os.path.join(directory, "data/team_15.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/teams/15", text=text)
text = open(os.path.join(directory, "data/teams.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/teams", text=text)
text = open(os.path.join(directory, "data/venue_5094.json")).read()
requests_mock.get("http://statsapi.web.nhl.com/api/v1/venues/5094", text=text)
|
from __future__ import unicode_literals
from frappe import _
import frappe
from frappe.model.document import Document
from datetime import date
from frappe import _
from six import string_types
from frappe.utils import date_diff
from frappe.core.doctype.communication.email import make
class EmployeeSkillMap(Document):
pass
def validate_expiry_date():
docName=frappe.get_all("Employee Skill Map")
for v in docName:
docList=frappe.db.get_list("Employee Skill Map",filters={'name':v.name},fields={'*'})
for row in docList:
documentList=frappe.db.get_list("Employee Skill",filters={'parenttype':'Employee Skill Map','parent':row.name},fields={'*'})
for val in documentList:
new_date=str(date.today())
expire_date=date_diff(val.expiration_date,new_date)
if expire_date==2:
userList=[]
message="Employee Skill '"+val.skill+"' will be expired in 2 days."
user=frappe.db.get_value('Employee',{'name':row.employee},'user_id')
userList.append(user)
manager=get_department_manager('Employee',row.employee)
if manager not in userList:
userList.append(manager)
q1=frappe.db.sql("""
select u.email
from tabUser u,`tabHas Role` r where
u.name = r.parent and r.role = 'HR User'
and u.enabled = 1
""")
if q1:
for q in q1:
for user in q:
if user not in userList:
userList.append(user)
for user in userList:
make(
subject = row.name,
recipients = user,
communication_medium = "Email",
content = message,
send_email = True
)
def get_department_manager(doctype,name):
department=frappe.db.get_value(doctype,{'name':name},'Department')
if department:
employee_id= frappe.db.get_value('Department',{'name':department},'department_manager')
if employee_id:
return frappe.db.get_value(doctype,{'name':employee_id},'user_id')
|
###########################################################################
# Imports
###########################################################################
# Standard library imports
import os
# Local imports
from code.__init__ import print_hello_world
###########################################################################
# Code
###########################################################################
ext = os.getcwd()
###########################################################################
# Main Code
###########################################################################
if __name__ == '__main__':
print_hello_world(f'I am located at {ext}')
|
import sys
import time
class Logger:
def __init__(self, logfile):
self.logfile = logfile
def message(self, s):
lt = time.gmtime()
s = "{0:02d}.{1:02d}.{2:04d} {3:02d}:{4:02d}:{5:02d} UTC: {6}\n".format(lt.tm_mday, lt.tm_mon, lt.tm_year,
lt.tm_hour, lt.tm_min, lt.tm_sec, s)
sys.stderr.write(s)
sys.stderr.flush()
open(self.logfile, "at").write(s)
|
#!/usr/bin/env python
"""
Code to load the policy learned from imitation learning and calculate reward
Example usage:
python3 run_imitation.py Humanoid-v2-epoch30 Humanoid-v2 --render \
--num_rollouts 20
Author of this script and included expert policies: Jonathan Ho (hoj@openai.com)
"""
import os
import pickle
import numpy as np
import tf_util
import gym
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
MODEL_DIR = 'saved_models'
def load_policy_from_file(sess, filename):
graph = tf.get_default_graph()
tf.saved_model.loader.load(
sess,
[tag_constants.SERVING],
filename
)
input_placeholder = graph.get_tensor_by_name('input_placeholder:0')
output = graph.get_tensor_by_name('fully_connected_2/BiasAdd:0')
return load_policy_from_session(input_placeholder, output)
def load_policy_from_session(input_placeholder, output):
return tf_util.function([input_placeholder], output)
def run_simulator(policy_fn, envname, num_rollouts, max_timesteps=None, render=False):
env = gym.make(envname)
max_steps = max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(num_rollouts):
# print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = policy_fn(obs[None,:])
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if render:
env.render()
if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
if steps >= max_steps:
break
returns.append(totalr)
return_mean = np.mean(returns)
return_std = np.std(returns)
print("return mean: ", return_mean)
print("return_std: ", return_std)
return return_mean, return_std, np.array(observations)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('imitation_policy_file', type=str)
parser.add_argument('--envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=40)
args = parser.parse_args()
if args.envname is None:
args.envname = "-".join(args.imitation_policy_file.split("-")[:2])
with tf.Session() as sess:
print('loading and building imitation policy')
policy_fn = load_policy_from_file(sess,
os.path.join(MODEL_DIR, args.imitation_policy_file))
print('loaded and built')
_ = run_simulator(policy_fn, args.envname, args.num_rollouts,
args.max_timesteps, args.render)
|
from os import system
import psycopg2
from operation.connector import conn
from operation.validation.userValidation import (
employeeNameValidation as employee_name_validation)
from statement.sqlmainstatement import get_delete_data_statement
def delete_data(employee_name):
try:
connect = conn()
db_cursor = connect.cursor()
print("Are you sure want to delete ", employee_name,
" profile ??(y/n) -> ", end='')
delete_choice = str(input())
if delete_choice in ('y', 'Y'):
sql_query = get_delete_data_statement(employee_name)
db_cursor.execute(sql_query)
connect.commit()
print('OPERATION SUCCESSFULLY . . . .')
else:
print("Aborted To Delete ", employee_name, " Profile")
db_cursor.close()
connect.close()
except(psycopg2.DatabaseError) as error:
print(error)
except(psycopg2.DataError) as error:
print(error)
def delete_data_main():
system('clear')
print('\t DELETE DATA MENU \t\n')
employee_name = str(input('Enter Employee Name : '))
employee_profile_status = employee_name_validation(employee_name)
if employee_profile_status is True:
delete_data(employee_name)
|
from region import *
class PseudoRegion(Region):
"""
PseudoRegion commands include: APERTURE, CUTV, DENP, DENS, DISP, DUMMY, DVAR, EDGE, GRID
OUTPUT, REFP, REF2, RESET, RKICK, ROTATE, TAPER, TILT, TRANSPORT, BACKGROUND, BFIELD, ENDB, ! or &
"""
def __init__(self, **kwargs):
pass
def __str__(self):
return '[A PseudoRegion can be either a APERTURE, CUTV, DENP, DENS, DISP, DUMMY, DVAR, EDGE, GRID\
OUTPUT, REFP, REF2, RESET, RKICK, ROTATE, TAPER, TILT, TRANSPORT, BACKGROUND, BFIELD, ENDB, ! or &]'
def __repr__(self):
return '[A PseudoRegion can be either a APERTURE, CUTV, DENP, DENS, DISP, DUMMY, DVAR, EDGE, GRID\
OUTPUT, REFP, REF2, RESET, RKICK, ROTATE, TAPER, TILT, TRANSPORT, BACKGROUND, BFIELD, ENDB, ! or &]'
|
## tensor-based operations examples
from py3plex.core import multinet
from py3plex.core import random_generators
## initiate an instance of a random graph
ER_multilayer = random_generators.random_multilayer_ER(500,8,0.05,directed=False)
## some simple visualization
visualization_params = {"display":True}
ER_multilayer.visualize_matrix(visualization_params)
some_nodes = [node for node in ER_multilayer.get_nodes()][0:5]
some_edges = [node for node in ER_multilayer.get_edges()][0:5]
## random node is accessed as follows
print(ER_multilayer[some_nodes[0]])
## and random edge as
print(ER_multilayer[some_edges[0][0]][some_edges[0][1]])
|
"""Add support for Django 1.4+ safe datetimes.
https://docs.djangoproject.com/en/1.4/topics/i18n/timezones/
"""
# TODO: the whole file seems to be not needed anymore, since Django has this tooling built-in
from datetime import datetime
try:
from django.conf import settings
from django.utils.timezone import now, utc
except ImportError:
def now():
return datetime.now()
def smart_datetime(*args):
value = datetime(*args)
return tz_aware(value)
def tz_aware(d):
value = d
if settings.USE_TZ:
value = d.replace(tzinfo=utc)
return value
|
from randluck.strategies import lucky_num
def test_get_lucky_num_from_birth_year():
birth_year = 1997
num = lucky_num.get_lucky_num_from_birth_year(birth_year)
assert num == 5430
|
import disnake as discord
from disnake.ext import commands
from datetime import datetime
from api.server import base, main
class OnMessagEdit(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_message_edit(self, before, after):
if base.guild(before.guild)[6] is not None:
if not before.author.bot:
embed = discord.Embed(description = main.get_lang(before.guild, "MESSAGE_EDIT"), timestamp = datetime.utcnow(), color = 0x60afff)
embed.add_field(name = main.get_lang(before.guild, "MESSAGE_EDIT_OLD"), value=f'```{before.content}```', inline = False)
embed.add_field(name = main.get_lang(before.guild, "MESSAGE_EDIT_NEW"), value=f'```{after.content}```', inline = False)
embed.add_field(name = main.get_lang(before.guild, "AUTHOR"), value=before.author.mention, inline = True)
embed.add_field(name = main.get_lang(before.guild, "CHANNEL"), value=before.channel.mention, inline = True)
embed.set_footer(text = main.get_lang(before.guild, "MESSAGE_ID").format(before.id))
await self.client.get_channel(int(base.guild(before.guild)[6])).send(embed = embed)
def setup(client):
client.add_cog(OnMessagEdit(client))
|
"""In this file, we define cascada's variable management system"""
from typing import Any, Union
import vartypes as vt
class CscType():
"""A generic cascada type"""
def __init__(self, nom, constructeur):
self.nom = nom
self.constructeur = constructeur
def __call__(self, *args, **kwargs):
return self.constructeur(*args, **kwargs)
def __str__(self):
return self.nom
def __repr__(self):
return self.__str__()
class CscVar():
"""A generic cascada variable"""
def __init__(self, type, valeur):
self.type = type
self.valeur = valeur
def __str__(self):
return "{} {}".format(self.type, self.valeur)
def __repr__(self):
return self.__str__()
class VarMgmtError(Exception):
"""An execption was encountered while managing the variables"""
pass
class VarJar():
"""The "jar" that cointains the variables"""
vartypes = {"int64": CscType("int64", vt.mk_int64), "uint64": CscType("uint64", vt.mk_uint64),
"int32": CscType("int32", vt.mk_int32), "uint32": CscType("uint32", vt.mk_uint32),
"uint8": CscType("uint8", vt.mk_uint8), "string": CscType("string", str),
"float": CscType("float", vt.mk_cfloat), "double": CscType("double", vt.mk_cdouble)}
def __init__(self):
self.bocal = {}
def ajouter_variable(self, nom: str, var: CscVar) -> None:
"""Adds a variable to the jar
Params:
nom: the name of the variable to add
var: its value
"""
try:
self.bocal[nom] = var
except BaseException:
raise VarMgmtError()
def lier_variables(self, lnom: str, rnom: str) -> None:
"""Binds two variables: rnom now points to the variable pointed to by lnom
Params:
lnom: the name of the variable that will be pointed to by both lnom and rnom
rnom: the reference that is going to be changed
Note:
Both the variable name lnom and rnom have to exist beforehand and have to be of the
type"""
try:
rv = self.bocal[rnom]
lv = self.bocal[lnom]
except BaseException:
raise VarMgmtError()
if rv.type.nom != lv.type.nom:
raise VarMgmtError
self.bocal[rnom] = self.bocal[lnom]
def acceder_variable(self, nom: str) -> CscVar:
"""Returns the variable named nom
Params:
nom: the name of the variable that should be returned
"""
try:
a = self.bocal[nom]
except BaseException:
raise VarMgmtError()
return a
def existance_variable(self, nom_variable: str) -> bool:
"""Checks whether the variable named nom_variable is defined
Params:
nom_variable: the name of the variable
Returns:
True if the variable is defined, False if not
"""
return nom_variable in self.bocal
def changer_valeur_variable(self, nom_variable: str, nouvelle_valeur: Any) -> None:
"""Changes the value of the variable named nom_variable, sets it to nouvelle_valeur
Params:
nom_variable: the name of the variable
nouvelle_valeur: its new value
"""
a = self.acceder_variable(nom_variable)
a.valeur = a.type.constructeur(nouvelle_valeur)
@staticmethod
def existance_type(nom_type: str) -> bool:
"""Checks whether the variable type nom_type is defined
Params:
nom_type: the name of the type
"""
return nom_type in VarJar.vartypes
@staticmethod
def recuperer_type(nom_type: str) -> Union[CscType, None]:
"""Returns the CscType object matching the type name nom_type
Params:
nom_type: the name of the variable type
Returns:
CscType|None: the CscType matching nom_type or None if it does not exist
"""
if VarJar.existance_type(nom_type):
return VarJar.vartypes[nom_type]
else:
return None
|
from cardiogrammer import create_app
app = create_app('production')
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 15 01:55:22 2020
@author: balajiramesh
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 00:25:12 2020
@author: balajiramesh
Raw : 16,319230 2,641562
Within study timeline: 14393806 2247749
Within study area and timeline: 7892752 1246896
AFter removing washout period: 7816138 1233913
After removeing missing data: 7,813,866 and 1,233,600 OP and IP ED visit records
"""
import pandas as pd
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
from datetime import timedelta, date,datetime
from dateutil import parser
import glob
import sys
sys.path.insert(1, r'Z:\Balaji\GRAScripts\dhs_scripts')
from recalculate_svi import recalculateSVI
#%%functions
def filter_mortality(df):
pat_sta=df.PAT_STATUS.copy()
pat_sta=pd.to_numeric(pat_sta,errors="coerce")
return pat_sta.isin([20,40,41,42]).astype('int') #status code for died
def get_sp_outcomes(sp,Dis_cat):
global sp_outcomes
return sp.merge(sp_outcomes.loc[:,['RECORD_ID','op',Dis_cat]],on=['RECORD_ID','op'],how='left')[Dis_cat].values
#%%read ip op data
INPUT_IPOP_DIR=r'Z:\Balaji\DSHS ED visit data(PII)\CleanedMergedJoined'
#read_op
op=pd.read_pickle(INPUT_IPOP_DIR+'\\op')
op=op.loc[:,['RECORD_ID','STMT_PERIOD_FROM','PAT_ADDR_CENSUS_BLOCK_GROUP','PAT_AGE_YEARS','SEX_CODE','RACE','PAT_STATUS','ETHNICITY','PAT_ZIP','LCODE']]
op['op']=True
#sp=pd.read_pickle(INPUT_IPOP_DIR+r'\op')
#read_ip
ip=pd.read_pickle(INPUT_IPOP_DIR+'\\ip')
ip=ip.loc[:,['RECORD_ID','STMT_PERIOD_FROM','PAT_ADDR_CENSUS_BLOCK_GROUP','PAT_AGE_YEARS','SEX_CODE','RACE','PAT_STATUS','ETHNICITY','PAT_ZIP','LCODE']]
ip['op']=False
#merge Ip and OP
op=pd.concat([op,ip])
sp=op
del op,ip
#read op/ip outcomes df
sp_outcomes=pd.read_csv(INPUT_IPOP_DIR+'\\ip_op_outcomes.csv')
#read flood ratio data
flood_data=pd.read_csv(r'Z:/Balaji/indundation_harvey/FloodRatioJoinedAll_v1/FloodInund_AllJoined_v1.csv')
#read svi data
SVI_df_raw=pd.read_csv(r'Z:/Balaji/SVI_Raw/TEXAS.csv')
SVI_df_raw.FIPS=pd.to_numeric(SVI_df_raw.FIPS)
#read population data
demos=pd.read_csv(r'Z:/Balaji/Census_data_texas/population/ACS_17_5YR_DP05_with_ann.csv',low_memory=False,skiprows=1)
demos.Id2=demos.Id2.astype("Int64")
#read study area counties
#county_to_filter=pd.read_csv('Z:/Balaji/counties_evacu_order.csv').GEOID.to_list()
county_to_filter=pd.read_csv('Z:\Balaji\DSHS ED visit data(PII)\contiesInStudyArea.csv').County_FIPS.to_list()
#%%read the categories file
outcome_cats=pd.read_csv('Z:/Balaji/GRAScripts/dhs_scripts/categories.csv')
outcome_cats.fillna('',inplace=True)
#%%predefine variable
flood_cats_in=1
floodr_use="DFO_R200" #['DFO_R200','DFO_R100','LIST_R20','DFO_R20','DFOuLIST_R20']
nullAsZero="True" #null flood ratios are changed to 0
floodZeroSep="True" # zeros are considered as seperate class
flood_data_zip=None
interv_dates=[20170825, 20170913, 20171014,20180701,20181001] #lower bound excluded
washout_period=[20170819,20170825] #including the dates specified
interv_dates_cats=['flood','PostFlood1','PostFlood2','NextYear1','NextYear2']
Dis_cat="ALL"
#%%cleaing for age, gender and race and create census tract
#age
sp.loc[:,'PAT_AGE_YEARS']=pd.to_numeric(sp.PAT_AGE_YEARS,errors="coerce")
sp.loc[:,'PAT_AGE_YEARS']=sp.loc[:,'PAT_AGE_YEARS'].astype('float')
#bin ages
#sp.loc[:,'PAT_AGE_YEARS']=pd.cut(sp.PAT_AGE_YEARS,bins=[0,1,4,11,16,25,64,150],include_lowest=True,labels=(0,1,4,11,16,25,64))
#gender
sp.loc[~sp.SEX_CODE.isin(["M","F"]),'SEX_CODE']=np.nan
sp.SEX_CODE=sp.SEX_CODE.astype('category').cat.reorder_categories(['M','F'],ordered=False)
#ethinicity
sp.loc[:,'ETHNICITY']=pd.to_numeric(sp.ETHNICITY,errors="coerce")
sp.loc[~sp.ETHNICITY.isin([1,2]),'ETHNICITY']=np.nan
sp.ETHNICITY=sp.ETHNICITY.astype('category').cat.reorder_categories([2,1],ordered=False)
sp.ETHNICITY.cat.rename_categories({2:'Non_Hispanic',1:'Hispanic'},inplace=True)
#race
sp.loc[:,'RACE']=pd.to_numeric(sp.RACE,errors="coerce")
sp.loc[(sp.RACE<=0) | (sp.RACE>5),'RACE']=np.nan
sp.loc[sp.RACE<=2,'RACE']=5
sp.RACE=sp.RACE.astype('category').cat.reorder_categories([4,3,5],ordered=False)
sp.RACE.cat.rename_categories({3:'black',4:'white',5:'other'},inplace=True)
#age
sp=sp[sp.PAT_AGE_YEARS<119]
#create tract id from block group id
sp.loc[:,'PAT_ADDR_CENSUS_TRACT']=(sp.PAT_ADDR_CENSUS_BLOCK_GROUP//10)
#%%filter records for counties in study area or from zip codes
sp=sp[(sp.PAT_ADDR_CENSUS_TRACT//1000000).isin(county_to_filter)].copy()
#%%keep only the dates we requested for
#remove records before 2016
sp=sp.loc[(~pd.isna(sp.STMT_PERIOD_FROM))&(~pd.isna(sp.PAT_ADDR_CENSUS_BLOCK_GROUP))]
sp=sp[((sp.STMT_PERIOD_FROM > 20160700) & (sp.STMT_PERIOD_FROM< 20161232))\
| ((sp.STMT_PERIOD_FROM > 20170400) & (sp.STMT_PERIOD_FROM< 20171232))\
| ((sp.STMT_PERIOD_FROM > 20180700) & (sp.STMT_PERIOD_FROM< 20181232))]
#remove data in washout period
sp= sp[~((sp.STMT_PERIOD_FROM >= washout_period[0]) & (sp.STMT_PERIOD_FROM <= washout_period[1]))]
#%% merge population and economy
demos_subset=demos.iloc[:,[1,3]]
demos_subset.columns=["PAT_ADDR_CENSUS_TRACT","Population"]
sp=sp.merge(demos_subset,on="PAT_ADDR_CENSUS_TRACT",how='left')
sp=sp.loc[sp.Population>0,]
#%% merge SVI after recategorization
svi=recalculateSVI(SVI_df_raw[SVI_df_raw.FIPS.isin(sp.PAT_ADDR_CENSUS_TRACT.unique())]).loc[:,["FIPS",'SVI','RPL_THEMES_1',"RPL_THEMES_2","RPL_THEMES_3","RPL_THEMES_4"]]
sp=sp.merge(svi,left_on="PAT_ADDR_CENSUS_TRACT",right_on="FIPS",how='left').drop("FIPS",axis=1)
sp['SVI_Cat']=pd.cut(sp.SVI,bins=np.arange(0,1.1,1/4),include_lowest=True,labels=[1,2,3,4])
#do same for the for cats
for i in ['1','2','3','4']:
sp['SVI_Cat_T'+i]=pd.cut(sp['RPL_THEMES_'+i],bins=np.arange(0,1.1,1/4),include_lowest=True,labels=[1,2,3,4])
#%%filter SVI cat for stratified analysis
#sp=sp[sp.SVI_Cat==4]
#%%merge flood ratio
flood_join_field='PAT_ADDR_CENSUS_TRACT'
if flood_data_zip is not None:
flood_data=flood_data_zip
flood_join_field='PAT_ZIP'
FLOOD_QUANTILES=["NO","FLood_1"]
floodr=flood_data.copy()
floodr.GEOID=pd.to_numeric(floodr.GEOID).astype("Int64")
floodr=floodr.loc[:,['GEOID']+[floodr_use]]
floodr.columns=['GEOID','floodr']
sp=sp.merge(floodr,left_on=flood_join_field,right_on='GEOID',how='left')
#make tracts with null as zero flooding
if nullAsZero == "True": sp.loc[pd.isna(sp.floodr),'floodr']=0.0
#categorize floods as per quantiles
tractsfloodr=sp.loc[~sp.duplicated(flood_join_field),[flood_join_field,'floodr']]
if floodZeroSep == "True":
#tractsfloodr.floodr= tractsfloodr.floodr.round(4)
s=tractsfloodr.loc[tractsfloodr.floodr>0,'floodr']
flood_bins=s.quantile(np.arange(0,1.1,1/(len(FLOOD_QUANTILES)-1))).to_numpy()
flood_bins[0]=1e-6
flood_bins=np.append([0],flood_bins)
else:
s=tractsfloodr.loc[tractsfloodr.floodr>-1,'floodr']
flood_bins=s.quantile(np.arange(0,1.1,1/len(FLOOD_QUANTILES))).to_numpy()
# adjust if some bincenters were zero
for i in range(1,len(FLOOD_QUANTILES)):
flood_bins[i]=i*1e-6 if flood_bins[i]==0.0 else flood_bins[i]
sp['floodr_cat']=pd.cut(sp.floodr,bins=flood_bins,right=True,include_lowest=True,labels=FLOOD_QUANTILES)
sp=sp.drop("GEOID",axis=1)
#%%calculating total visits for offset
vists_per_tract=sp.groupby(['PAT_ADDR_CENSUS_TRACT','STMT_PERIOD_FROM'])\
.size().reset_index().rename(columns={0:'TotalVisits'})
sp=sp.merge(vists_per_tract,on=['PAT_ADDR_CENSUS_TRACT','STMT_PERIOD_FROM'],how='left')
#%%pat age categoriy based on SVI theme 2 <=17,18-64,>=65
sp['AGE_cat']=pd.cut(sp.PAT_AGE_YEARS,bins=[-1,5,12,17,45,64,200],labels=['lte5','6-12','13-17','18-45','46-64','gt64']).cat.reorder_categories(['lte5','6-12','13-17','18-45','46-64','gt64'])
#sp['AGE_cat']=pd.cut(sp.PAT_AGE_YEARS,bins=[-1,1,5,12,17,45,64,200],labels=['lte1','2-5','6-12','13-17','18-45','46-64','gt64']).cat.reorder_categories(['lte1','2-5','6-12','13-17','18-45','46-64','gt64'])
#%%function for looping
def run():
#%%filter records for specific outcome
df=sp#[sp.SVI_Cat=='SVI_filter'] #--------------Edit here for stratified model
if Dis_cat=="DEATH":df.loc[:,'Outcome']=filter_mortality(sp)
if Dis_cat=="ALL":df.loc[:,'Outcome']=1
if Dis_cat in outcome_cats.category.to_list():df.loc[:,'Outcome']=get_sp_outcomes(sp, Dis_cat)
#%% bringing in intervention
df.loc[:,'Time']=pd.cut(df.STMT_PERIOD_FROM,\
bins=[0]+interv_dates+[20190101],\
labels=['control']+[str(i) for i in interv_dates_cats]).cat.as_unordered()
#set after 2018 as control
#df.loc[df.STMT_PERIOD_FROM>20180100,'Time']="control" #if Dis_cat!="Psychiatric" else np.nan
df=df.loc[~pd.isna(df.Time),]
#take only control period
#df=df[df.Time=='control']
#%%controling for year month and week of the day
df['year']=(df.STMT_PERIOD_FROM.astype('int32')//1e4).astype('category')
df['month']=(df.STMT_PERIOD_FROM.astype('int32')//1e2%100).astype('category')
df['weekday']=pd.to_datetime(df.STMT_PERIOD_FROM.astype('str'),format='%Y%m%d').dt.dayofweek.astype('category')
#%% save cross tab
#counts_outcome=pd.DataFrame(df.Outcome.value_counts())
outcomes_recs=df.loc[(df.Outcome>0)&(~pd.isna(df.loc[:,['floodr_cat','Time','year','month','weekday' ,'PAT_AGE_YEARS',
'SEX_CODE','RACE','ETHNICITY','SVI_Cat']]).any(axis=1)),]
counts_outcome=pd.crosstab(outcomes_recs.floodr_cat,outcomes_recs.Time)
counts_outcome.to_csv(Dis_cat+"_aux"+".csv")
print(counts_outcome)
del outcomes_recs
#%%running the model
if Dis_cat!="ALL":offset=np.log(df.TotalVisits)
#offset=None
#if Dis_cat=="ALL":offset=np.log(df.Population)
#change floodr into 0-100
df.floodr=df.floodr*100
formula='Outcome'+' ~ '+' floodr_cat * Time '+' + year + month + weekday' + ' + RACE + SEX_CODE + PAT_AGE_YEARS + ETHNICITY'#' + op '
model = smf.gee(formula=formula,groups=df[flood_join_field], data=df,offset=offset,missing='drop',family=sm.families.Poisson(link=sm.families.links.log()))
results=model.fit()
print(results.summary())
print(np.exp(results.params))
# print(np.exp(results.conf_int()))
#%% creating result dataframe tables
results_as_html = results.summary().tables[1].as_html()
reg_table=pd.read_html(results_as_html, header=0, index_col=0)[0].reset_index()
reg_table.loc[:,'coef']=np.exp(reg_table.coef)
reg_table.loc[:,['[0.025', '0.975]']]=np.exp(reg_table.loc[:,['[0.025', '0.975]']])
reg_table=reg_table.loc[~(reg_table['index'].str.contains('month')
| reg_table['index'].str.contains('weekday')
#| reg_table['index'].str.contains('year')
#| reg_table['index'].str.contains('PAT_AGE_YEARS'))
),]
reg_table['index']=reg_table['index'].str.replace("\[T.",'_').str.replace('\]','')
reg_table['model']='base'
reg_table_dev=pd.read_html(results.summary().tables[0].as_html())[0]
# counts_outcome.loc["flood_bins",'Outcome']=str(flood_bins)
#return reg_table
#%%write the output
reg_table.to_csv(Dis_cat+"_reg"+".csv")
#reg_table_dev.to_csv(Dis_cat+"_dev"+".csv")
|
import os
import time
import numpy as np
import scipy.io as scio
import os.path as osp
from functools import partial
from .misc import img_exts, prog_map
from ..imagesize import imsize
def load_synthtext(img_dir, ann_dir=None, classes=None, nproc=10, box_key='wordBB'):
if classes is not None:
print('load_synthtext loads all objects as `text`, arguments classes is no use')
assert osp.isdir(img_dir), f'The {img_dir} is not an existing dir!'
assert box_key in ['wordBB', 'charBB']
print('Starting loading SynthText dataset information.')
start_time = time.time()
if ann_dir is not None:
assert osp.isfile(ann_dir) and ann_dir.endswith('.mat'), \
f'Invaild ann_dir for SynthText {ann_dir}'
_contents = parse_synthtext_mat(ann_dir, box_key)
else:
_contents = []
for root, _, files in os.walk(img_dir):
root = root.replace(img_dir, '', 1)
for f in files:
if osp.splitext(f)[-1] not in img_exts:
continue
filename = osp.join(root, f)
filename.lstrip('/')
bboxes = np.zeros((0, 8), dtype=np.float32)
labels = np.zeros((0, ), dtype=np.int64)
ann = dict(bboxes=bboxes, labels=labels)
_contents.append(dict(filename=filename, ann=ann))
_load_func = partial(_merge_img_size, root=img_dir)
contents = prog_map(_load_func, _contents, nproc)
end_time = time.time()
print(f'Finishing loading SynthText, get {len(contents)} images, ',
f'using {end_time-start_time:.3f}s.')
return contents, ('text', )
def _merge_img_size(content, root):
imgfile = osp.join(root, content['filename'])
if not osp.exists(imgfile):
print('image {imgfile} is not exists, discard this image information')
return None
width, height = imsize(imgfile)
content.update(dict(width=width, height=height))
return content
def parse_synthtext_mat(matfile, box_key):
data = scio.loadmat(matfile)
all_bboxes = data[box_key][0]
imnames = data['imnames'][0]
contents = []
for imgfile, bboxes in zip(imnames, all_bboxes):
bboxes = bboxes.astype(np.float32)
if bboxes.ndim == 2:
bboxes = bboxes[..., None]
bboxes = bboxes.transpose(2, 1, 0)
bboxes = bboxes.reshape(-1, 8)
labels = np.zeros((len(bboxes), ), dtype=np.int64)
ann = dict(bboxes=bboxes, labels=labels)
contents.append(dict(filename=imgfile[0], ann=ann))
return contents
|
import RPi.GPIO as GPIO
import time
def main():
GPIO.setmode(GPIO.BCM)
GPIO.setup(21, GPIO.OUT)
while True:
GPIO.output(21, GPIO.HIGH)
time.sleep(1)
print('ON')
GPIO.output(21, GPIO.LOW)
time.sleep(1)
print('OFF')
try:
main()
except KeyboardInterrupt:
GPIO.cleanup()
|
# -*- coding: utf-8 -*-
import hashlib
import time
import inflection
from ...exceptions.orm import ModelNotFound
from ...query.expression import QueryExpression
from ..collection import Collection
import orator.orm.model
from .relation import Relation
from .result import Result
class BelongsToMany(Relation):
_table = None
_other_key = None
_foreign_key = None
_relation_name = None
_pivot_columns = []
_pivot_wheres = []
def __init__(self, query, parent, table, foreign_key, other_key, relation_name=None):
"""
:param query: A Builder instance
:type query: Builder
:param parent: The parent model
:type parent: Model
:param table: The pivot table
:type table: str
:param foreign_key: The foreign key
:type foreign_key: str
:param other_key: The other key
:type other_key: str
:param relation_name: The relation name
:type relation_name: str
"""
self._table = table
self._other_key = other_key
self._foreign_key = foreign_key
self._relation_name = relation_name
self._pivot_columns = []
self._pivot_wheres = []
super(BelongsToMany, self).__init__(query, parent)
def get_results(self):
"""
Get the results of the relationship.
"""
return self.get()
def where_pivot(self, column, operator=None, value=None, boolean='and'):
"""
Set a where clause for a pivot table column.
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:param boolean: The boolean of the where clause
:type boolean: str
:return: self
:rtype: self
"""
self._pivot_wheres.append([column, operator, value, boolean])
return self._query.where('%s.%s' % (self._table, column), operator, value, boolean)
def or_where_pivot(self, column, operator=None, value=None):
"""
Set an or where clause for a pivot table column.
:param column: The column of the where clause, can also be a QueryBuilder instance for sub where
:type column: str|Builder
:param operator: The operator of the where clause
:type operator: str
:param value: The value of the where clause
:type value: mixed
:return: self
:rtype: BelongsToMany
"""
return self.where_pivot(column, operator, value, 'or')
def first(self, columns=None):
"""
Execute the query and get the first result.
:type columns: list
"""
self._query.take(1)
results = self.get(columns)
if len(results) > 0:
return results.first()
return
def first_or_fail(self, columns=None):
"""
Execute the query and get the first result or raise an exception.
:type columns: list
:raises: ModelNotFound
"""
model = self.first(columns)
if model is not None:
return model
raise ModelNotFound(self._parent.__class__)
def get(self, columns=None):
"""
Execute the query as a "select" statement.
:type columns: list
:rtype: orator.Collection
"""
if columns is None:
columns = ['*']
if self._query.get_query().columns:
columns = []
select = self._get_select_columns(columns)
models = self._query.add_select(*select).get_models()
self._hydrate_pivot_relation(models)
if len(models) > 0:
models = self._query.eager_load_relations(models)
return self._related.new_collection(models)
def _hydrate_pivot_relation(self, models):
"""
Hydrate the pivot table relationship on the models.
:type models: list
"""
for model in models:
pivot = self.new_existing_pivot(self._clean_pivot_attributes(model))
model.set_relation('pivot', pivot)
def _clean_pivot_attributes(self, model):
"""
Get the pivot attributes from a model.
:type model: orator.Model
"""
values = {}
delete_keys = []
for key, value in model.get_attributes().items():
if key.find('pivot_') == 0:
values[key[6:]] = value
delete_keys.append(key)
for key in delete_keys:
delattr(model, key)
return values
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
self._set_join()
if BelongsToMany._constraints:
self._set_where()
def get_relation_count_query(self, query, parent):
"""
Add the constraints for a relationship count query.
:type query: orator.orm.Builder
:type parent: orator.orm.Builder
:rtype: orator.orm.Builder
"""
if parent.get_query().from__ == query.get_query().from__:
return self.get_relation_count_query_for_self_join(query, parent)
self._set_join(query)
return super(BelongsToMany, self).get_relation_count_query(query, parent)
def get_relation_count_query_for_self_join(self, query, parent):
"""
Add the constraints for a relationship count query on the same table.
:type query: orator.orm.Builder
:type parent: orator.orm.Builder
:rtype: orator.orm.Builder
"""
query.select(QueryExpression('COUNT(*)'))
table_prefix = self._query.get_query().get_connection().get_table_prefix()
hash_ = self.get_relation_count_hash()
query.from_('%s AS %s%s' % (self._table, table_prefix, hash_))
key = self.wrap(self.get_qualified_parent_key_name())
return query.where('%s.%s' % (hash_, self._foreign_key), '=', QueryExpression(key))
def get_relation_count_hash(self):
"""
Get a relationship join table hash.
:rtype: str
"""
return 'self_%s' % (hashlib.md5(str(time.time()).encode()).hexdigest())
def _get_select_columns(self, columns=None):
"""
Set the select clause for the relation query.
:param columns: The columns
:type columns: list
:rtype: list
"""
if columns == ['*'] or columns is None:
columns = ['%s.*' % self._related.get_table()]
return columns + self._get_aliased_pivot_columns()
def _get_aliased_pivot_columns(self):
"""
Get the pivot columns for the relation.
:rtype: list
"""
defaults = [self._foreign_key, self._other_key]
columns = []
for column in defaults + self._pivot_columns:
value = '%s.%s AS pivot_%s' % (self._table, column, column)
if value not in columns:
columns.append('%s.%s AS pivot_%s' % (self._table, column, column))
return columns
def _has_pivot_column(self, column):
"""
Determine whether the given column is defined as a pivot column.
:param column: The column to check
:type column: str
:rtype: bool
"""
return column in self._pivot_columns
def _set_join(self, query=None):
"""
Set the join clause for the relation query.
:param query: The query builder
:type query: orator.orm.Builder
:return: self
:rtype: BelongsToMany
"""
if not query:
query = self._query
base_table = self._related.get_table()
key = '%s.%s' % (base_table, self._related.get_key_name())
query.join(self._table, key, '=', self.get_other_key())
return self
def _set_where(self):
"""
Set the where clause for the relation query.
:return: self
:rtype: BelongsToMany
"""
foreign = self.get_foreign_key()
self._query.where(foreign, '=', self._parent.get_key())
return self
def add_eager_constraints(self, models):
"""
Set the constraints for an eager load of the relation.
:type models: list
"""
self._query.where_in(self.get_foreign_key(), self.get_keys(models))
def init_relation(self, models, relation):
"""
Initialize the relation on a set of models.
:type models: list
:type relation: str
"""
for model in models:
model.set_relation(relation, Result(self._related.new_collection(), self, model))
return models
def match(self, models, results, relation):
"""
Match the eagerly loaded results to their parents.
:type models: list
:type results: Collection
:type relation: str
"""
dictionary = self._build_dictionary(results)
for model in models:
key = model.get_key()
if key in dictionary:
collection = Result(self._related.new_collection(dictionary[key]), self, model)
else:
collection = Result(self._related.new_collection(), self, model)
model.set_relation(relation, collection)
return models
def _build_dictionary(self, results):
"""
Build model dictionary keyed by the relation's foreign key.
:param results: The results
:type results: Collection
:rtype: dict
"""
foreign = self._foreign_key
dictionary = {}
for result in results:
key = getattr(result.pivot, foreign)
if key not in dictionary:
dictionary[key] = []
dictionary[key].append(result)
return dictionary
def touch(self):
"""
Touch all of the related models of the relationship.
"""
key = self.get_related().get_key_name()
columns = self.get_related_fresh_update()
ids = self.get_related_ids()
if len(ids) > 0:
self.get_related().new_query().where_in(key, ids).update(columns)
def get_related_ids(self):
"""
Get all of the IDs for the related models.
:rtype: list
"""
related = self.get_related()
full_key = related.get_qualified_key_name()
return self.get_query().select(full_key).lists(related.get_key_name())
def save(self, model, joining=None, touch=True):
"""
Save a new model and attach it to the parent model.
:type model: orator.Model
:type joining: dict
:type touch: bool
:rtype: orator.Model
"""
if joining is None:
joining = {}
model.save({'touch': False})
self.attach(model.get_key(), joining, touch)
return model
def save_many(self, models, joinings=None):
"""
Save a list of new models and attach them to the parent model
:type models: list
:type joinings: dict
:rtype: list
"""
if joinings is None:
joinings = {}
for key, model in enumerate(models):
self.save(model, joinings.get(key), False)
self.touch_if_touching()
return models
def find_or_new(self, id, columns=None):
"""
Find a model by its primary key or return new instance of the related model.
:param id: The primary key
:type id: mixed
:param columns: The columns to retrieve
:type columns: list
:rtype: Collection or Model
"""
instance = self._query.find(id, columns)
if instance is None:
instance = self.get_related().new_instance()
return instance
def first_or_new(self, _attributes=None, **attributes):
"""
Get the first related model record matching the attributes or instantiate it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self._query.where(attributes).first()
if instance is None:
instance = self._related.new_instance()
return instance
def first_or_create(self, _attributes=None, _joining=None, _touch=True, **attributes):
"""
Get the first related model record matching the attributes or create it.
:param attributes: The attributes
:type attributes: dict
:rtype: Model
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self._query.where(attributes).first()
if instance is None:
instance = self.create(attributes, _joining or {}, _touch)
return instance
def update_or_create(self, attributes, values=None, joining=None, touch=True):
"""
Create or update a related record matching the attributes, and fill it with values.
:param attributes: The attributes
:type attributes: dict
:param values: The values
:type values: dict
:rtype: Model
"""
if values is None:
values = {}
instance = self._query.where(attributes).first()
if instance is None:
return self.create(values, joining, touch)
instance.fill(**values)
instance.save({'touch': False})
return instance
def create(self, _attributes=None, _joining=None, _touch=True, **attributes):
"""
Create a new instance of the related model.
:param attributes: The attributes
:type attributes: dict
:rtype: orator.orm.Model
"""
if _attributes is not None:
attributes.update(_attributes)
instance = self._related.new_instance(attributes)
instance.save({'touch': False})
self.attach(instance.get_key(), _joining, _touch)
return instance
def create_many(self, records, joinings=None):
"""
Create a list of new instances of the related model.
"""
if joinings is None:
joinings = []
instances = []
for key, record in enumerate(records):
instances.append(self.create(record), joinings[key], False)
self.touch_if_touching()
return instances
def sync(self, ids, detaching=True):
"""
Sync the intermediate tables with a list of IDs or collection of models
"""
changes = {
'attached': [],
'detached': [],
'updated': []
}
if isinstance(ids, Collection):
ids = ids.model_keys()
current = self._new_pivot_query().lists(self._other_key).all()
records = self._format_sync_list(ids)
detach = [x for x in current if x not in records.keys()]
if detaching and len(detach) > 0:
self.detach(detach)
changes['detached'] = detach
changes.update(self._attach_new(records, current, False))
if len(changes['attached']) or len(changes['updated']):
self.touch_if_touching()
return changes
def _format_sync_list(self, records):
"""
Format the sync list so that it is keyed by ID.
"""
results = {}
for attributes in records:
if not isinstance(attributes, dict):
id, attributes = attributes, {}
else:
id = list(attributes.keys())[0]
attributes = attributes[id]
results[id] = attributes
return results
def _attach_new(self, records, current, touch=True):
"""
Attach all of the IDs that aren't in the current dict.
"""
changes = {
'attached': [],
'updated': []
}
for id, attributes in records.items():
if id not in current:
self.attach(id, attributes, touch)
changes['attached'].append(id)
elif len(attributes) > 0 and self.update_existing_pivot(id, attributes, touch):
changes['updated'].append(id)
return changes
def update_existing_pivot(self, id, attributes, touch=True):
"""
Update an existing pivot record on the table.
"""
if self.updated_at() in self._pivot_columns:
attributes = self.set_timestamps_on_attach(attributes, True)
updated = self.new_pivot_statement_for_id(id).update(attributes)
if touch:
self.touch_if_touching()
return updated
def attach(self, id, attributes=None, touch=True):
"""
Attach a model to the parent.
"""
if isinstance(id, orator.orm.Model):
id = id.get_key()
query = self.new_pivot_statement()
if not isinstance(id, list):
id = [id]
query.insert(self._create_attach_records(id, attributes))
if touch:
self.touch_if_touching()
def _create_attach_records(self, ids, attributes):
"""
Create a list of records to insert into the pivot table.
"""
records = []
timed = (self._has_pivot_column(self.created_at())
or self._has_pivot_column(self.updated_at()))
for key, value in enumerate(ids):
records.append(self._attacher(key, value, attributes, timed))
return records
def _attacher(self, key, value, attributes, timed):
"""
Create a full attachment record payload.
"""
id, extra = self._get_attach_id(key, value, attributes)
record = self._create_attach_record(id, timed)
if extra:
record.update(extra)
return record
def _get_attach_id(self, key, value, attributes):
"""
Get the attach record ID and extra attributes.
"""
if isinstance(value, dict):
key = list(value.keys())[0]
attributes.update(value[key])
return [key, attributes]
return value, attributes
def _create_attach_record(self, id, timed):
"""
Create a new pivot attachement record.
"""
record = {}
record[self._foreign_key] = self._parent.get_key()
record[self._other_key] = id
if timed:
record = self._set_timestamps_on_attach(record)
return record
def _set_timestamps_on_attach(self, record, exists=False):
"""
Set the creation an update timestamps on an attach record.
"""
fresh = self._parent.fresh_timestamp()
if not exists and self._has_pivot_column(self.created_at()):
record[self.created_at()] = fresh
if self._has_pivot_column(self.updated_at()):
record[self.updated_at()] = fresh
return record
def detach(self, ids=None, touch=True):
"""
Detach models from the relationship.
"""
if isinstance(ids, orator.orm.model.Model):
ids = ids.get_key()
if ids is None:
ids = []
query = self._new_pivot_query()
if not isinstance(ids, list):
ids = [ids]
if len(ids) > 0:
query.where_in(self._other_key, ids)
if touch:
self.touch_if_touching()
results = query.delete()
return results
def touch_if_touching(self):
"""
Touch if the parent model is being touched.
"""
if self._touching_parent():
self.get_parent().touch()
if self.get_parent().touches(self._relation_name):
self.touch()
def _touching_parent(self):
"""
Determine if we should touch the parent on sync.
"""
return self.get_related().touches(self._guess_inverse_relation())
def _guess_inverse_relation(self):
return inflection.camelize(inflection.pluralize(self.get_parent().__class__.__name__))
def _new_pivot_query(self):
"""
Create a new query builder for the pivot table.
:rtype: orator.orm.Builder
"""
query = self.new_pivot_statement()
for where_args in self._pivot_wheres:
query.where(*where_args)
return query.where(self._foreign_key, self._parent.get_key())
def new_pivot_statement(self):
"""
Get a new plain query builder for the pivot table.
"""
return self._query.get_query().new_query().from_(self._table)
def new_pivot_statement_for_id(self, id):
"""
Get a new pivot statement for a given "other" id.
"""
return self._new_pivot_query().where(self._other_key, id)
def new_pivot(self, attributes=None, exists=False):
"""
Create a new pivot model instance.
"""
pivot = self._related.new_pivot(self._parent, attributes, self._table, exists)
return pivot.set_pivot_keys(self._foreign_key, self._other_key)
def new_existing_pivot(self, attributes):
"""
Create a new existing pivot model instance.
"""
return self.new_pivot(attributes, True)
def with_pivot(self, *columns):
"""
Set the columns on the pivot table to retrieve.
"""
columns = list(columns)
self._pivot_columns += columns
return self
def with_timestamps(self, created_at=None, updated_at=None):
"""
Specify that the pivot table has creation and update columns.
"""
if not created_at:
created_at = self.created_at()
if not updated_at:
updated_at = self.updated_at()
return self.with_pivot(created_at, updated_at)
def get_related_fresh_update(self):
"""
Get the related model's update at column at
"""
return {self._related.get_updated_at_column(): self._related.fresh_timestamp()}
def get_has_compare_key(self):
"""
Get the key for comparing against the parent key in "has" query.
"""
return self.get_foreign_key()
def get_foreign_key(self):
return '%s.%s' % (self._table, self._foreign_key)
def get_other_key(self):
return '%s.%s' % (self._table, self._other_key)
def get_table(self):
return self._table
def get_relation_name(self):
return self._relation_name
def _new_instance(self, model):
relation = BelongsToMany(
self.new_query(),
model,
self._table,
self._foreign_key,
self._other_key,
self._relation_name
)
relation.with_pivot(*self._pivot_columns)
return relation
|
import pandas as pd
import numpy as np
import stat_tools as st
import matplotlib.pyplot as plt
import datetime as dt
import os, glob
GHI_path2 = '~/ldata/GHI2/'
GHI_path = '~/ldata/GHI/'
sensors = np.arange(1,11)
for sensor in sensors:
with np.load(GHI_path2+'GHI_'+str(sensor)+'.npz') as data:
ty, y = data['timestamp'], data['ghi']
ty += 4*3600;
mk = ty>=(dt.datetime(2018,9,29)-dt.datetime(2018,1,1)).total_seconds()
ty[mk] += 3600
try:
if len(ty)>1:
np.savez(GHI_path+'GHI_'+str(sensor), timestamp=ty,ghi=y);
except:
pass
|
# Copyright 2019 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from string import ascii_lowercase
import pytest
from google.cloud.bigquery import Dataset, SchemaField, Table
from streamsets.testframework.markers import gcp, sdc_min_version
from streamsets.testframework.utils import get_random_string
from streamsets.testframework.decorators import stub
logger = logging.getLogger(__name__)
DESTINATION_STAGE_NAME = 'com_streamsets_pipeline_stage_bigquery_destination_BigQueryDTarget'
pytestmark = [pytest.mark.category('nonstandard')]
@stub
@pytest.mark.parametrize('stage_attributes', [{'credentials_provider': 'JSON'}])
def test_credentials_file_content_in_json(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'credentials_provider': 'JSON_PROVIDER'}])
def test_credentials_file_path_in_json(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'credentials_provider': 'DEFAULT_PROVIDER'},
{'credentials_provider': 'JSON'},
{'credentials_provider': 'JSON_PROVIDER'}])
def test_credentials_provider(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_dataset(sdc_builder, sdc_executor):
pass
@stub
@pytest.mark.parametrize('stage_attributes', [{'ignore_invalid_column': False}, {'ignore_invalid_column': True}])
def test_ignore_invalid_column(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_insert_id_expression(sdc_builder, sdc_executor):
pass
@gcp
@sdc_min_version("3.11.0")
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, gcp, stage_attributes):
"""Test that BigQuery API does not return a NullPointerException if asked for an empty table name
Pipeline:
dev_raw_data_source >> [google_bigquery, wiretap]
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
# Dev Raw Data Source
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
data = {'table': ''}
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=json.dumps(data),
stop_after_first_batch=True)
dataset = get_random_string(ascii_lowercase, 10)
table_name = '${record:value(\'/table\')}'
google_bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME, type='destination')
google_bigquery.set_attributes(dataset=dataset,
table_name=table_name,
stage_on_record_error=stage_attributes['on_record_error'])
wiretap = pipeline_builder.add_wiretap()
# Implement pipeline topology
dev_raw_data_source >> [google_bigquery, wiretap.destination]
pipeline = pipeline_builder.build().configure_for_environment(gcp)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
except Exception as e:
if stage_attributes['on_record_error'] == 'STOP_PIPELINE':
logger.info("Verify that pipeline transitioned to RUN_ERROR state...")
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert status == 'RUN_ERROR'
assert 'BIGQUERY_18' in e.response['message']
finally:
if stage_attributes['on_record_error'] == 'TO_ERROR':
logger.info("Verify that destination stage produced 1 error record...")
# Verify that we have exactly one record
assert len(wiretap.error_records) == 1
assert wiretap.error_records[0].header['errorCode'] == 'BIGQUERY_18'
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert status == 'FINISHED'
@stub
def test_preconditions(sdc_builder, sdc_executor):
pass
@stub
def test_project_id(sdc_builder, sdc_executor):
pass
@stub
def test_required_fields(sdc_builder, sdc_executor):
pass
@stub
def test_table_cache_size(sdc_builder, sdc_executor):
pass
@stub
def test_table_name(sdc_builder, sdc_executor):
pass
|
# Generated by Django 3.2 on 2021-05-03 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WalletTransition', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='transition',
name='Done',
field=models.BooleanField(default=False, verbose_name='ุชุฑฺฉุงูุด ุงูุฌุงู
ุดุฏู'),
),
]
|
#!/usr/bin/env python3
import os
import sys
import argparse
import pkgutil
from importlib import import_module
import robosat_pink.tools
def main():
parser = argparse.ArgumentParser(prog="./rsp")
subparser = parser.add_subparsers(title="RoboSat.pink tools", metavar="")
try:
module = import_module("robosat_pink.tools.{}".format(sys.argv[1]))
module.add_parser(subparser)
except:
path = os.path.dirname(robosat_pink.tools.__file__)
for tool in [name for _, name, _ in pkgutil.iter_modules([path]) if name != "__main__"]:
module = import_module("robosat_pink.tools.{}".format(tool))
module.add_parser(subparser)
subparser.required = True
args = parser.parse_args()
args.func(args)
if __name__ == "__main__":
main()
|
"""
Coding - Decoding simulation of an image
========================================
This example shows a simulation of the transmission of an image as a
binary message through a gaussian white noise channel with an LDPC coding and
decoding system.
"""
# Author: Hicham Janati (hicham.janati@inria.fr)
#
# License: BSD (3-clause)
import numpy as np
from pyldpc import make_ldpc, ldpc_images
from pyldpc.utils_img import gray2bin, rgb2bin
from matplotlib import pyplot as plt
from PIL import Image
from time import time
##################################################################
# Let's see the image we are going to be working with
eye = Image.open("data/eye.png")
# convert it to grayscale and keep one channel
eye = np.asarray(eye.convert('LA'))[:, :, 0]
# Convert it to a binary matrix
eye_bin = gray2bin(eye)
print("Eye shape: (%s, %s)" % eye.shape)
print("Binary Eye shape: (%s, %s, %s)" % eye_bin.shape)
n = 200
d_v = 3
d_c = 4
seed = 42
##################################################################
# First we create a small LDPC code i.e a pair of decoding and coding matrices
# H and G. H is a regular parity-check matrix with d_v ones per row
# and d_c ones per column
H, G = make_ldpc(n, d_v, d_c, seed=seed, systematic=True, sparse=True)
##################################################################
# Now we simulate the transmission with Gaussian white noise
# and recover the original image via belief-propagation.
snr = 8
eye_coded, eye_noisy = ldpc_images.encode_img(G, eye_bin, snr, seed=seed)
print("Coded eye shape", eye_coded.shape)
t = time()
eye_decoded = ldpc_images.decode_img(G, H, eye_coded, snr, eye_bin.shape)
t = time() - t
print("Eye | Decoding time: ", t)
error_decoded_eye = abs(eye - eye_decoded).mean()
error_noisy_eye = abs(eye_noisy - eye).mean()
##################################################################
# With RGB images, we proceed similarly
print("\n\n")
tiger = np.asarray(Image.open("data/tiger.jpg"))
# Convert it to a binary matrix
tiger_bin = rgb2bin(tiger)
print("Tiger shape: (%s, %s, %s)" % tiger.shape)
print("Tiger Binary shape: (%s, %s, %s)" % tiger_bin.shape)
tiger_coded, tiger_noisy = ldpc_images.encode_img(G, tiger_bin, snr, seed=seed)
print("Coded Tiger shape", tiger_coded.shape)
t = time()
tiger_decoded = ldpc_images.decode_img(G, H, tiger_coded, snr, tiger_bin.shape)
t = time() - t
print("Tiger | Decoding time: ", t)
error_decoded_tiger = abs(tiger - tiger_decoded).mean()
error_noisy_tiger = abs(tiger_noisy - tiger).mean()
titles_eye = ["Original", "Noisy | Err = %.3f %%" % error_noisy_eye,
"Decoded | Err = %.3f %%" % error_decoded_eye]
titles_tiger = ["Original", "Noisy | Err = %.3f %%" % error_noisy_tiger,
"Decoded | Err = %.3f %%" % error_decoded_tiger]
all_imgs = [[eye, eye_noisy, eye_decoded], [tiger, tiger_noisy, tiger_decoded]]
f, axes = plt.subplots(2, 3, figsize=(18, 12))
for ax_row, titles, img_list, cmap in zip(axes, [titles_eye, titles_tiger],
all_imgs, ["gray", None]):
for ax, data, title in zip(ax_row, img_list, titles):
ax.imshow(data, cmap=cmap)
ax.set_title(title, fontsize=20)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
|
#! /usr/bin/python3
"""A helper program to test cartesian goals for the JACO and MICO arms."""
import roslib; roslib.load_manifest('kinova_arm')
import rospy
import numpy as np
import actionlib
import kinova_msgs.msg
import std_msgs.msg
from sensor_msgs.msg import JointState
import geometry_msgs.msg
import sys
import math
""" Global variable """
class KinovaController(object):
def __init__(self):
try:
rospy.init_node('j2n6s300_node')
except:
pass
self.current_joint_pose = None
self.arm_joint_number = 6
self.prefix = 'j2n6s300_'
self.currentJointCommand = [0]*7
self.currentCartesianCommand = [0.212322831154, -0.257197618484, 0.509646713734, 1.63771402836, 1.11316478252, 0.134094119072] # default home in unit mq
rospy.Subscriber('/j2n6s300_driver/out/joint_state', JointState, self._sub_callback_joint_state)
def _sub_callback_joint_state(self, data):
self.current_joint_pose = data
# print(self.current_joint_pose)
def joint_angle_client(self, angle_set):
"""Send a joint angle goal to the action server."""
action_address = '/j2n6s300_driver/joints_action/joint_angles'
client = actionlib.SimpleActionClient(action_address,
kinova_msgs.msg.ArmJointAnglesAction)
client.wait_for_server()
goal = kinova_msgs.msg.ArmJointAnglesGoal()
goal.angles.joint1 = angle_set[0]
goal.angles.joint2 = angle_set[1]
goal.angles.joint3 = angle_set[2]
goal.angles.joint4 = angle_set[3]
goal.angles.joint5 = angle_set[4]
goal.angles.joint6 = angle_set[5]
goal.angles.joint7 = angle_set[6]
client.send_goal(goal)
def cartesian_pose_client(self, position, orientation):
action_address = '/j2n6s300_driver/pose_action/tool_pose'
client = actionlib.SimpleActionClient(action_address, kinova_msgs.msg.ArmPoseAction)
client.wait_for_server()
goal = kinova_msgs.msg.ArmPoseGoal()
goal.pose.header = std_msgs.msg.Header(frame_id=('j2n6s300_link_base'))
goal.pose.pose.position = geometry_msgs.msg.Point(
x=position[0], y=position[1], z=position[2])
goal.pose.pose.orientation = geometry_msgs.msg.Quaternion(
x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3])
client.send_goal(goal)
def EulerXYZ2Quaternion(self, EulerXYZ_):
tx_, ty_, tz_ = EulerXYZ_[0:3]
sx = math.sin(0.5 * tx_)
cx = math.cos(0.5 * tx_)
sy = math.sin(0.5 * ty_)
cy = math.cos(0.5 * ty_)
sz = math.sin(0.5 * tz_)
cz = math.cos(0.5 * tz_)
qx_ = sx * cy * cz + cx * sy * sz
qy_ = -sx * cy * sz + cx * sy * cz
qz_ = sx * sy * cz + cx * cy * sz
qw_ = -sx * sy * sz + cx * cy * cz
Q_ = [qx_, qy_, qz_, qw_]
return Q_
def getcurrentCartesianCommand(self):
# wait to get current position
topic_address = '/j2n6s300_driver/out/cartesian_command'
rospy.Subscriber(topic_address, kinova_msgs.msg.KinovaPose, self.setcurrentCartesianCommand)
rospy.wait_for_message(topic_address, kinova_msgs.msg.KinovaPose)
def setcurrentCartesianCommand(self, feedback):
global currentCartesianCommand
currentCartesianCommand_str_list = str(feedback).split("\n")
for index in range(0,len(currentCartesianCommand_str_list)):
temp_str=currentCartesianCommand_str_list[index].split(": ")
self.currentCartesianCommand[index] = float(temp_str[1])
def getcurrentJointCommand(self):
# wait to get current position
topic_address = '/j2n6s300_driver/out/joint_command'
rospy.Subscriber(topic_address, kinova_msgs.msg.JointAngles, self.setcurrentJointCommand)
rospy.wait_for_message(topic_address, kinova_msgs.msg.JointAngles)
def setcurrentJointCommand(self, feedback):
currentJointCommand_str_list = str(feedback).split("\n")
for index in range(0,len(currentJointCommand_str_list)):
temp_str=currentJointCommand_str_list[index].split(": ")
self.currentJointCommand[index] = float(temp_str[1])
def unitParser_cartesian(self, pose_value_, relative_):
""" Argument unit """
global currentCartesianCommand
position_ = pose_value_[:3]
orientation_ = pose_value_[3:]
for i in range(0,3):
if relative_:
position_[i] = pose_value_[i] + self.currentCartesianCommand[i]
else:
position_[i] = pose_value_[i]
if relative_:
orientation_rad_list = self.currentCartesianCommand[3:]
orientation_rad = [orientation_[i] + orientation_rad_list[i] for i in range(0,3)]
else:
orientation_rad = orientation_
orientation_q = self.EulerXYZ2Quaternion(orientation_rad)
pose_mq_ = position_ + orientation_q
return pose_mq_
def unitParser_joint(self, joint_value, relative_):
""" Argument unit """
global currentJointCommand
joint_degree_command = list(map(math.degrees, joint_value))
# get absolute value
if relative_:
joint_degree_absolute_ = [joint_degree_command[i] + self.currentJointCommand[i] for i in range(0, len(joint_value))]
else:
joint_degree_absolute_ = joint_degree_command
joint_degree = joint_degree_absolute_
return joint_degree
def cartesian_movement(self, input_array, relative):
self.getcurrentCartesianCommand()
pose_mq = self.unitParser_cartesian(input_array, relative)
try:
poses = [float(n) for n in pose_mq]
self.cartesian_pose_client(poses[:3], poses[3:])
except rospy.ROSInterruptException:
print("program interrupted before completion")
def joint_movement(self, input_array, relative):
# get Current finger position if relative position
self.getcurrentJointCommand()
joint_degree = self.unitParser_joint(input_array, relative)
positions = [0]*7
try:
for i in range(0, self.arm_joint_number):
positions[i] = joint_degree[i]
self.joint_angle_client(positions)
except rospy.ROSInterruptException:
print('program interrupted before completion')
def move_home(self):
self.getcurrentJointCommand()
joint_degree = self.unitParser_joint([4.80750942, 2.92150663, 1, -2.085, 1.4470525, 7.60178156], False)
positions = [0]*7
try:
for i in range(0, self.arm_joint_number):
positions[i] = joint_degree[i]
self.joint_angle_client(positions)
except rospy.ROSInterruptException:
print('program interrupted before completion')
def get_current_position(self):
if self.current_joint_pose == None:
print('No joint pose read!')
return
print(self.current_joint_pose.position)
|
from pytest import fixture
from starlette.status import (
HTTP_200_OK,
HTTP_204_NO_CONTENT,
HTTP_401_UNAUTHORIZED,
HTTP_404_NOT_FOUND,
HTTP_409_CONFLICT,
)
from app.core.schemas import AccessTokenPayload, UserRead
user_dict = {"email": "email@domain.com"}
update_payload = {"email": "new@email.com"}
@fixture
def user(users_repository):
return users_repository.create(user_dict)
@fixture
def user_jwt(jwt_service, user):
jwt_payload = AccessTokenPayload(
user_id=user.id,
roles=user.roles,
exp=AccessTokenPayload.calc_exp(1),
sid="123456",
)
return jwt_service.generate_token(jwt_payload.dict())
def read_self_request(client, jwt):
return client.get("/api/v1/users/me", headers={"Authorization": f"Bearer {jwt}"})
@fixture
def read_self_response(client, user_jwt):
yield read_self_request(client, user_jwt)
def test_read_self_should_return_status_200_for_valid_jwt(read_self_response):
assert read_self_response.status_code == HTTP_200_OK
def test_read_self_should_return_json(read_self_response):
assert read_self_response.headers["Content-Type"] == "application/json"
def test_read_self_should_return_user_schema(read_self_response):
assert UserRead(**read_self_response.json())
def test_read_self_should_return_same_user(read_self_response, user):
assert UserRead(**read_self_response.json()) == UserRead.from_orm(user)
def test_read_self_should_return_401_for_invalid_jwt(client):
response = read_self_request(client, 123)
assert response.status_code == HTTP_401_UNAUTHORIZED
def test_read_self_should_return_404_if_user_does_not_exist(client, jwt_service):
jwt_payload = AccessTokenPayload(
user_id=1, roles=[], exp=AccessTokenPayload.calc_exp(1), sid="123456"
)
jwt = jwt_service.generate_token(jwt_payload.dict())
response = read_self_request(client, jwt)
assert response.status_code == HTTP_404_NOT_FOUND
def update_self_request(client, jwt, payload=update_payload):
return client.put(
"/api/v1/users/me", headers={"Authorization": f"Bearer {jwt}"}, json=payload,
)
@fixture
def update_self_response(client, user_jwt):
yield update_self_request(client, user_jwt)
def test_update_self_should_return_status_200_for_valid_jwt(update_self_response):
assert update_self_response.status_code == HTTP_200_OK
def test_update_self_should_return_json(update_self_response):
assert update_self_response.headers["Content-Type"] == "application/json"
def test_update_self_should_return_user_schema(update_self_response):
assert UserRead(**update_self_response.json())
def test_update_self_should_return_same_user(update_self_response, user):
user.email = update_payload["email"]
assert UserRead(**update_self_response.json()) == UserRead.from_orm(user)
def test_update_self_should_follow_schema(client, user_jwt):
update_payload["roles"] = ["ABC", "123"]
response = update_self_request(client, user_jwt, update_payload)
assert UserRead(**response.json()).roles == []
def test_update_self_should_return_401_for_invalid_jwt(client):
response = update_self_request(client, 123)
assert response.status_code == HTTP_401_UNAUTHORIZED
def test_update_self_should_return_404_if_user_does_not_exist(client, jwt_service):
jwt_payload = AccessTokenPayload(
user_id=1, roles=[], exp=AccessTokenPayload.calc_exp(1), sid="123456"
)
jwt = jwt_service.generate_token(jwt_payload.dict())
response = update_self_request(client, jwt)
assert response.status_code == HTTP_404_NOT_FOUND
def test_update_self_should_return_409_if_data_conflicts(
client, jwt_service, user, users_repository
):
users_repository.create(update_payload)
jwt_payload = AccessTokenPayload(
user_id=user.id,
roles=user.roles,
exp=AccessTokenPayload.calc_exp(1),
sid="123456",
)
jwt = jwt_service.generate_token(jwt_payload.dict())
response = update_self_request(client, jwt)
assert response.status_code == HTTP_409_CONFLICT
def delete_self_request(client, jwt):
return client.delete(
"/api/v1/users/me", headers={"Authorization": f"Bearer {jwt}"},
)
@fixture
def delete_self_response(client, user_jwt, users_repository):
yield delete_self_request(client, user_jwt)
def test_delete_self_should_return_status_204_for_valid_jwt(delete_self_response):
assert delete_self_response.status_code == HTTP_204_NO_CONTENT
def test_delete_self_should_return_json(delete_self_response):
assert delete_self_response.headers["Content-Type"] == "application/json"
def test_delete_self_should_delete_user(delete_self_response, users_repository):
assert len(users_repository.find_all()) == 0
def test_delete_self_should_return_401_for_invalid_jwt(client):
response = delete_self_request(client, 123)
assert response.status_code == HTTP_401_UNAUTHORIZED
def test_delete_self_should_return_404_if_user_does_not_exist(client, jwt_service):
jwt_payload = AccessTokenPayload(
user_id=1, roles=[], exp=AccessTokenPayload.calc_exp(1), sid="123456"
)
jwt = jwt_service.generate_token(jwt_payload.dict())
response = delete_self_request(client, jwt)
assert response.status_code == HTTP_404_NOT_FOUND
|
def setup():
size(600, 500)
smooth()
background(255)
strokeWeight(30)
noLoop()
def draw():
stroke(20, 100)
i = range(1, 8)
for i in range(1, 5):
line(i*2*50, 200, 150 + (2*i-1)*50, 300)
line(i*2*50 + 100, 200, 50 + (2*i-1)*50, 300)
|
import matplotlib.pyplot as plt
import math
distance = 100
def draw_robot(robot, error):
line_length = 2
o = math.sin(robot.theta) * line_length + robot.y
a = math.cos(robot.theta) * line_length + robot.x
if error:
plt.plot([robot.x], [robot.y], 'ro', markersize=6)
else:
plt.plot([robot.x], [robot.y], 'ro', markersize=10)
plt.plot([robot.x, a], [robot.y, o], 'r-', linewidth=4)
def draw_particles(particles, error):
for particle in particles:
line_length = 2
o = math.sin(particle.theta) * line_length + particle.y
a = math.cos(particle.theta) * line_length + particle.x
if error:
plt.plot([particle.x], [particle.y], 'bo', markersize=6)
else:
plt.plot([particle.x], [particle.y], 'bo', markersize=10)
plt.plot([particle.x, a], [particle.y, o], 'b-', linewidth=4)
def draw_poles(poles):
for pole in poles:
plt.plot([pole.x], [pole.y], 'gs', markersize=15)
def plot(
robot,
particles=None,
poles=None,
j=None,
autorun=False,
time=1,
error=False):
plt.figure(figsize=[8, 8])
if j is not None:
plt.title(str(j))
plt.grid(linestyle='--')
plt.yticks([0, int(distance / 4), int(distance / 2),
int(distance * 3 / 4), distance])
plt.xticks([0, int(distance / 4), int(distance / 2),
int(distance * 3 / 4), distance])
if particles is not None:
draw_particles(particles, error)
if poles is not None:
draw_poles(poles)
draw_robot(robot, error)
plt.xlim([0, distance])
plt.ylim([0, distance])
if error:
plt.xlim([-distance * 0.2, distance * 1.2])
plt.ylim([-distance * 0.2, distance * 1.2])
if autorun:
if j == 0:
# Not sure why this is needed but it is.
plt.pause(time)
plt.show(block=False)
plt.pause(time)
plt.close()
else:
plt.show()
def print_particle_error(robot, particles):
weights = []
for particle in particles:
weights += [particle.weight]
best_particle = weights.index(max(weights))
diff_x = round(abs(robot.x - particles[best_particle].x), 1)
diff_y = round(abs(robot.y - particles[best_particle].y), 1)
diff_pos = round(diff_x + diff_y, 2)
diff_theta = round(abs(robot.theta - particles[best_particle].theta), 2)
if diff_theta > math.pi:
diff_theta = round(abs(diff_theta - math.pi * 2), 2)
print("Error: [" + str(diff_pos) + ", " + str(diff_theta) + "]")
print("Weight Sum: " + str(round(sum(weights), 2)))
print("Max Weight: " + str(round(particles[best_particle].weight, 2)))
if (diff_pos < 3) and (diff_theta < 0.5):
print("Converged!")
|
import sublime, sublime_plugin
import time
import datetime
def log_error(ex, command):
error_msg = 'Error in ' + command + ': ' + str(ex)
print error_msg
sublime.status_message(error_msg)
class SubliMapCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.edit = edit
self.view.window().show_input_panel('Map lambda t, i:', '', self.map_text, None, None)
def map_text(self, inp):
try:
edit = self.view.begin_edit()
replacer = eval('lambda t, i: str(' + inp + ')')
for idx, region in enumerate(self.view.sel()):
txt = self.view.substr(region)
replacement = replacer(txt, idx)
self.view.replace(self.edit, region, replacement)
except Exception as e:
log_error(e, 'SubliMap')
finally:
self.view.end_edit(edit)
class SubliReduceCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.edit = edit
self.view.window().show_input_panel('Reduce lambda x, y:', '', self.reduce_text, None, None)
def reduce_text(self, inp):
try:
edit = self.view.begin_edit()
reducer = eval('lambda x, y: ' + inp)
result = reduce(reducer, map(lambda x: self.view.substr(x), self.view.sel()))
sublime.status_message("Result: " + str(result))
map(lambda x: self.view.erase(edit, x), self.view.sel())
self.view.replace(edit, self.view.sel()[0], str(result))
except Exception as e:
log_error(e, 'SubliReduce')
finally:
self.view.end_edit(edit)
|
from envirophat import motion
from time import sleep
import pantilthat
import math
import pigpio
import keyboard
def track(init_heading, i, motor, prev):
acc = motion.accelerometer()
heading = (motion.heading() + i) % 360
# handle tilt
tilt = math.floor(90 * acc[0])
if tilt > 90:
tilt = 90
elif tilt < -90:
tilt = -90
if prev[0] is None or abs(tilt-prev[0]) > 3:
motor.tilt(tilt)
else:
tilt = prev[0]
# handle pan
heading = heading - init_heading
if heading < -90:
heading = -90
elif heading > 90:
heading = 90
if prev[1] is None or abs(heading - prev[1]) > .5:
motor.pan(heading)
else:
heading = prev[1]
return (tilt, heading)
motor = None
def main():
# init cam motor
global motor
motor = pantilthat.PanTilt()
# set up wheel motors
ESC = 4
pi = pigpio.pi()
pi.set_servo_pulsewidth(ESC, 0)
max_val = 2000
min_val = 700
# arming the motors
print("Connect the battery and press enter")
input()
pi.set_servo_pulsewidth(ESC, 0)
sleep(1)
pi.set_servo_pulsewidth(ESC, max_val)
sleep(1)
pi.set_servo_pulsewidth(ESC, min_val)
sleep(1)
print("Arming done")
# cam control functions
def toggle_tracking(_):
global tracking_eh
tracking_eh = not tracking_eh
def move_right(_):
global motor
motor.pan((motor.get_servo_one() + 10) if motor.get_servo_one() < 80 else 90)
def move_left(_):
global motor
motor.pan((motor.get_servo_one() - 10) if motor.get_servo_one() > -80 else -90)
def move_up(_):
global motor
motor.tilt((motor.get_servo_two() + 10) if motor.get_servo_two() < 80 else 90)
def move_down(_):
global motor
motor.tilt((motor.get_servo_two() - 10) if motor.get_servo_two() > -80 else -90)
def go_fast(_):
pi.set_servo_pulsewidth(ESC, max_val)
def so_slow(_):
pi.set_servo_pulsewidth(ESC, min_val)
def stop_motors(_):
pi.set_servo_pulsewidth(ESC, 0)
# cam controls
keyboard.on_press_key("w", move_up)
keyboard.on_press_key("s", move_down)
keyboard.on_press_key("a", move_right)
keyboard.on_press_key("d", move_left)
keyboard.on_press_key(" ", toggle_tracking)
# drive controls
keyboard.on_press_key("up", go_fast)
keyboard.on_press_key("down", so_slow)
for key in ["up", "down"]:
keyboard.on_release_key(key, stop_motors)
# main bot loop
prev = (None, None)
while True:
# handle init heading
if not tracking_eh:
init_heading = motion.heading()
i = 0
if init_heading > 270:
init_heading -= 90
i = -90
elif init_heading < 90:
init_heading += 90
i = 90
else:
prev = track(init_heading, i, motor, prev)
tracking_eh = False
if __name__ == "__main__":
main()
|
import pygame
from pygame.locals import *
from sgc.widgets._locals import *
from sgc.widgets.base_widget import Simple
class MyBasicWidget(Simple):
_default_size = (100,100)
class MyWidget(Simple):
_default_size = (100,100)
_available_images = ("over",)
_extra_images = {"thing": ((0.3, 0), (1, -4))}
_can_focus = True
_settings_default = {"label": "Text", "label_col": (255,255,255)}
_on = False
def _draw_base(self):
for img, col in zip(self._available_images,
(Color("red"), Color("green"))):
self._images[img].fill(Color("black"))
pygame.draw.circle(self._images[img], col,
(self.rect.w//2, self.rect.h//2), self.rect.w//2)
def _draw_thing(self, image, size):
image.fill(Color("darkolivegreen4"))
def _draw_final(self):
text = Font["widget"].render(self._settings["label"], True,
self._settings["label_col"])
x = self.rect.w//2 - text.get_width()//2
y = self.rect.h//2 - text.get_height()//2
for img in self._available_images:
self._images[img].blit(text, (x,y))
def update(self, time):
self._images["thing"].rect.centerx = (pygame.mouse.get_pos()[0] -
self.rect_abs.x)
def _event(self, event):
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
self._on = not self._on
self.on_click()
self._switch("over" if self._on else "image")
else:
self._images["thing"]._show = not self._images["thing"]._show
def _config(self, **kwargs):
if "init" in kwargs:
self._images["thing"].rect.y = 2
for key in ("label", "label_col"):
if key in kwargs:
self._settings[key] = kwargs[key]
def on_click(self):
pygame.event.post(self._create_event("click", on=self._on))
def _focus_enter(self, focus):
if focus == 1:
self._draw_rect = True
self._switch()
def _focus_exit(self):
self._draw_rect = False
self._switch()
if __name__ == "__main__":
import sgc
from sgc.locals import *
pygame.display.init()
pygame.font.init()
screen = sgc.surface.Screen((640,480))
clock = pygame.time.Clock()
# Basic widget, inherits behaviours from Simple
widget = MyBasicWidget((200,100), pos=(10,50),
label="Free label", label_side="top")
widget.add()
other = MyWidget(pos=(200,250), label="Custom")
other.add(0)
running = True
while running:
time = clock.tick(20)
for event in pygame.event.get():
if event.type == GUI:
print("State: ", event.on)
sgc.event(event)
if event.type == QUIT:
running = False
screen.fill((0,0,100))
sgc.update(time)
pygame.display.flip()
|
from typing import Union
__all__ = [
'num',
]
num = Union[int, float]
|
# coding=utf-8
import peewee as pw
import redis as redis_
from config import PG_HOST, PG_USER, PG_PWD
db = pw.PostgresqlDatabase('demo', **{'host': PG_HOST, 'user': PG_USER, 'password': PG_PWD})
redis = redis_.Redis()
|
#!/usr/bin/python3
import os
import sys
import operator
import numpy as np
"""
Script to collect f1 scores of different output files that are in the provided directory, the script conlleval.pl in the
output directory is used; the provided directory will afterwards contain a file named f1.scores, containing the sorted
scores of the different files, their mean and std.
"""
def collect_results(dir):
"""
Collects the results in a directory (output files, i.e. random.txt), writing them in a file
named f1.scores in the same directory.
"""
dir = dir.rstrip("/") + "/" # make sure the / is there without having two of them
f1dir = dir + "f1/"
if not os.path.exists(f1dir):
os.makedirs(f1dir)
# list of strings of the form "parameters: f1"
res = dict()
# measure performance of each file and retrieve f1 score, writing it to a file in the f1 directory
for file in os.listdir(dir):
if file.endswith(".txt"):
input_name = dir + file
output_name = f1dir + file
cmd = "../output/conlleval.pl < %s | sed '2q;d' | grep -oE '[^ ]+$' > %s" % (input_name, output_name)
os.system(cmd)
# collect different f1 scores in the same file
for file in sorted(os.listdir(f1dir)):
if file.endswith(".txt"):
with open(f1dir + file, "r") as input:
name = file
for line in input:
res[name] = float(line.strip("\n"))
values = np.array([v for v in res.values()])
mean, std = values.mean(), values.std()
res = sorted(res.items(), key=operator.itemgetter(1), reverse=True)
with open(dir + "f1.scores", "w") as output:
output.write("MEAN: %f, STD: %f\n" % (mean, std))
for pair in res:
output.write(pair[0] + " " + str(pair[1]) + "\n")
output.close()
os.system("rm -r %s" % f1dir)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: \n./collect_results path \nWhere path is a directory containing .txt files with predictions; "
"this script will collect the F1 score of each prediction file and report them in a file called "
"f1.scores; the evaluation script conlleval.pl in the output directory is used.")
exit()
else:
# get to directory and for each sub directory collect results
dir = sys.argv[1]
collect_results(dir)
|
#!/usr/bin/env python
import importlib
import os
import sys
import copy
import numpy as np
from BaseDriver import LabberDriver
from sequence_builtin import CPMG, PulseTrain, Rabi, SpinLocking, iSWAP_Cplr
from sequence_rb import SingleQubit_RB, TwoQubit_RB
from sequence import SequenceToWaveforms
# dictionary with built-in sequences
SEQUENCES = {'Rabi': Rabi,
'CP/CPMG': CPMG,
'Pulse train': PulseTrain,
'1-QB Randomized Benchmarking': SingleQubit_RB,
'2-QB Randomized Benchmarking': TwoQubit_RB,
'Spin-locking' : SpinLocking,
'iSWAP gate using coupler' : iSWAP_Cplr,
'Custom': type(None)}
class Driver(LabberDriver):
"""This class implements a multi-qubit pulse generator."""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection."""
# init variables
self.sequence = None
self.sequence_to_waveforms = SequenceToWaveforms(1)
self.waveforms = {}
# always create a sequence at startup
name = self.getValue('Sequence')
self.sendValueToOther('Sequence', name)
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation."""
# only do something here if changing the sequence type
if quant.name == 'Sequence':
# create new sequence if sequence type changed
new_type = SEQUENCES[value]
if not isinstance(self.sequence, new_type):
# create a new sequence object
if value == 'Custom':
# for custom python files
path = self.getValue('Custom Python file')
(path, modName) = os.path.split(path)
sys.path.append(path)
modName = modName.split('.py')[0] # strip suffix
mod = importlib.import_module(modName)
# the custom sequence class has to be named
# 'CustomSequence'
if not isinstance(self.sequence, mod.CustomSequence):
self.sequence = mod.CustomSequence(1)
else:
# standard built-in sequence
self.sequence = new_type(1)
elif (quant.name == 'Custom Python file' and
self.getValue('Sequence') == 'Custom'):
# for custom python files
path = self.getValue('Custom Python file')
(path, modName) = os.path.split(path)
modName = modName.split('.py')[0] # strip suffix
sys.path.append(path)
mod = importlib.import_module(modName)
# the custom sequence class has to be named 'CustomSequence'
if not isinstance(self.sequence, mod.CustomSequence):
self.sequence = mod.CustomSequence(1)
return value
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation."""
# ignore if no sequence
if self.sequence is None:
return quant.getValue()
# check type of quantity
if (quant.name.startswith('Voltage, QB') or
quant.name.startswith('Single-shot, QB')):
# perform demodulation, check if config is updated
if self.isConfigUpdated():
# update sequence object with current driver configuation
config = self.instrCfg.getValuesDict()
self.sequence.set_parameters(config)
self.sequence_to_waveforms.set_parameters(config)
# get qubit index and waveforms
n = int(quant.name.split(', QB')[1]) - 1
demod_iq = self.getValue('Demodulation - IQ')
if demod_iq:
signal_i = self.getValue('Demodulation - Input I')
signal_q = self.getValue('Demodulation - Input Q')
else:
signal = self.getValue('Demodulation - Input')
ref = self.getValue('Demodulation - Reference')
# perform demodulation
if demod_iq:
value = self.sequence_to_waveforms.readout.demodulate_iq(
n, signal_i, signal_q, ref)
else:
value = self.sequence_to_waveforms.readout.demodulate(
n, signal, ref)
# average values if not single-shot
if not quant.name.startswith('Single-shot, QB'):
value = np.mean(value)
elif quant.isVector():
# traces, check if waveform needs to be re-calculated
if self.isConfigUpdated():
# update sequence object with current driver configuation
config = self.instrCfg.getValuesDict()
self.sequence.set_parameters(config)
self.sequence_to_waveforms.set_parameters(config)
# check if calculating multiple sequences, for randomization
if config.get('Output multiple sequences', False):
# create multiple randomizations, store in memory
n_call = int(config.get('Number of multiple sequences', 1))
calls = []
for n in range(n_call):
config['Randomize'] += 1
calls.append(
copy.deepcopy(
self.sequence_to_waveforms.get_waveforms(
self.sequence.get_sequence(config))))
# after all calls are done, convert output to matrix form
self.waveforms = dict()
n_qubit = self.sequence.n_qubit
# Align RB waveforms to end
align_RB_to_end = config.get('Align RB waveforms to end', False)
# start with xy, z and gate waveforms, list of data
for key in ['xy', 'z', 'gate']:
# get size of longest waveform
self.waveforms[key] = []
for n in range(n_qubit):
length = max([len(call[key][n]) for call in calls])
# build matrix
datatype = calls[0][key][n].dtype
data = np.zeros((n_call, length), dtype=datatype)
for m, call in enumerate(calls):
if align_RB_to_end:
data[m][-len(call[key][n]):] = call[key][n]
else:
data[m][:len(call[key][n])] = call[key][n]
self.waveforms[key].append(data)
# same for readout waveforms
for key in ['readout_trig', 'readout_iq']:
length = max([len(call[key]) for call in calls])
datatype = calls[0][key].dtype
data = np.zeros((n_call, length), dtype=datatype)
for m, call in enumerate(calls):
if align_RB_to_end:
data[m][-len(call[key]):] = call[key]
else:
data[m][:len(call[key])] = call[key]
self.waveforms[key] = data
else:
# normal operation, calcluate waveforms
self.waveforms = self.sequence_to_waveforms.get_waveforms(
self.sequence.get_sequence(config))
# get correct data from waveforms stored in memory
value = self.getWaveformFromMemory(quant)
else:
# for all other cases, do nothing
value = quant.getValue()
return value
def getWaveformFromMemory(self, quant):
"""Return data from already calculated waveforms."""
# check which data to return
if quant.name[-1] in ('1', '2', '3', '4', '5', '6', '7', '8', '9'):
# get name and number of qubit waveform asked for
name = quant.name[:-1]
n = int(quant.name[-1]) - 1
# get correct vector
if name == 'Trace - I':
if self.getValue('Swap IQ'):
value = self.waveforms['xy'][n].imag
else:
value = self.waveforms['xy'][n].real
elif name == 'Trace - Q':
if self.getValue('Swap IQ'):
value = self.waveforms['xy'][n].real
else:
value = self.waveforms['xy'][n].imag
elif name == 'Trace - Z':
value = self.waveforms['z'][n]
elif name == 'Trace - G':
value = self.waveforms['gate'][n]
elif quant.name == 'Trace - Readout trig':
value = self.waveforms['readout_trig']
elif quant.name == 'Trace - Readout I':
value = self.waveforms['readout_iq'].real
elif quant.name == 'Trace - Readout Q':
value = self.waveforms['readout_iq'].imag
# return data as dict with sampling information
dt = 1 / self.sequence_to_waveforms.sample_rate
value = quant.getTraceDict(value, dt=dt)
return value
if __name__ == '__main__':
pass
|
"""
plotting utilities; best to import * to use utilities in this module for full functionality
"""
#####Imports#####
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import linregress
import numpy as np
#####Defaults#####
mpl.rcParams['axes.formatter.useoffset'] = False
VERY_SMALL_SIZE = 8
SMALL_SIZE = 12
MEDIUM_SIZE = 16
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n",
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n",
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels\n",
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n",
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n",
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n",
plt.rc('figure', titlesize=SMALL_SIZE) # fontsize of the figure title"
cycle = list(plt.rcParams['axes.prop_cycle'].by_key().values())[0]
def generate_work_distribution_plots(work_dict,
BAR_dict,
out_prefix,
ligands_per_plot = 4,
width = 8.5,
height_unit = 5,
legend=False):
"""
make a series of plots of the forward/backward work distributions and BAR values (with errors) for each phase
arguments
work_dict : dict
dict of output type qmlify.analysis.fully_aggregate_work_dict
BAR_dict : dict
dict of output type qmlify.analysis.compute_BAR
out_prefix : str
string that is the prefix to lig{i}to{j}.pdf
ligands_per_plot : int, default 4
number of columns (each representing a different ligand, complex and solvent) in each image
width : float, default 8.5
width of each image in inches
height_unit : float, default 5
height of each image in inches
legend : bool, default False
whether to show legend of each plot
"""
unique_ligands = sorted(list(work_dict.keys()))
full_divisions = len(unique_ligands)//ligands_per_plot
remainder = len(unique_ligands) % ligands_per_plot
num_plots = full_divisions + 1 if remainder !=0 else full_divisions
print(f"generating {num_plots} plots...")
ligand_counter = 0
for plot_idx in range(num_plots): #iterate over all plots
fig = plt.figure(figsize=(width, height_unit))
start = ligand_counter
end = start + ligands_per_plot if plot_idx != num_plots else start + remainder
print(f"plotting ligands: {start} through {end}")
for ligand_idx, ligand in enumerate(unique_ligands[start:end]):
forward_solvent_work_hists = work_dict[ligand]['solvent']['forward']
backward_solvent_work_hists = work_dict[ligand]['solvent']['backward']
forward_complex_work_hists = work_dict[ligand]['complex']['forward']
backward_complex_work_hists = work_dict[ligand]['complex']['backward']
#detemine xlims
abs_min = min([np.min(entry) for entry in [forward_solvent_work_hists, -backward_solvent_work_hists, forward_complex_work_hists, -backward_complex_work_hists]])
abs_max = max([np.max(entry) for entry in [forward_solvent_work_hists, -backward_solvent_work_hists, forward_complex_work_hists, -backward_complex_work_hists]])
#BAR results:
complex_BARs = BAR_dict[ligand]['complex']
solvent_BARs = BAR_dict[ligand]['solvent']
#complex
ax_complex = fig.add_subplot(2, ligands_per_plot, ligand_idx+1)
ax_complex.set_title(f"ligand {ligand}: complex")
ax_complex.get_yaxis().set_ticks([])
ax_complex.get_xaxis().set_ticklabels([])
offset = complex_BARs[0][0]
rel_min = abs_min - offset - 5
rel_max = abs_max - offset + 5
ax_complex.set_xlim(rel_min, rel_max)
for entry in complex_BARs:
dg, ddg = entry
ax_complex.axvline(dg - offset, color='k')
ax_complex.axvline(dg - offset + ddg, color = 'gray', ls = '--')
ax_complex.axvline(dg - offset - ddg, color = 'gray', ls = '--')
counter=0
for entry in forward_complex_work_hists:
if counter==0 and ligand_idx==0:
label = 'complex: forward'
else:
label = None
sns.distplot(entry - offset, color = cycle[0], ax=ax_complex, label=label)
counter+=1
counter=0
for entry in backward_complex_work_hists:
if counter==0 and ligand_idx==0:
label = 'complex: backward'
else:
label = None
sns.distplot(-entry - offset, color = cycle[1], ax=ax_complex, label=label)
counter+=1
if legend: plt.legend()
#solvent
ax_solvent = fig.add_subplot(2, ligands_per_plot, ligand_idx+ligands_per_plot+1)
ax_solvent.set_title(f"ligand {ligand}: solvent")
ax_solvent.get_yaxis().set_ticks([])
ax_solvent.set_xlabel(f"work [kT]")
ax_solvent.set_xlim(rel_min, rel_max)
for entry in solvent_BARs:
dg, ddg = entry
ax_solvent.axvline(dg - offset, color='k')
ax_solvent.axvline(dg - offset + ddg, color = 'gray', ls = '--')
ax_solvent.axvline(dg - offset - ddg, color = 'gray', ls = '--')
counter=0
for entry in forward_solvent_work_hists:
if counter==0 and ligand_idx==0:
label = 'solvent: forward'
else:
label = None
sns.distplot(entry - offset, color = cycle[0], ax=ax_solvent, label=label)
counter+=1
counter=0
for entry in backward_solvent_work_hists:
if counter==0 and ligand_idx==0:
label = 'solvent: backward'
else:
label = None
sns.distplot(-entry - offset, color = cycle[1], ax=ax_solvent, label=label)
counter+=1
if legend: plt.legend()
plt.tight_layout()
fig.savefig(f"{out_prefix}.lig{start}to{end}.pdf")
ligand_counter += ligands_per_plot
def plot_calibration(solvent_dict=None,
complex_dict=None,
timestep_in_fs = 2.0,
fig_width=8.5,
fig_height=7.5,
suptitle = None,
plot_name = "calibration.pdf"):
"""
make a calibration plot for the work standard deviation w.r.t. annealing time; write plot to disk
NOTE : each dict is of the form returned by `qmlify.analyis. extract_work_calibrations`
arguments
solvent_dict : dict, default None
dict of [annealing_steps: work_array]
complex_dict : dict, default None
dict of [annealing_steps : work_array]
timestep_in_fs : float, default 2.0
timestep size in fs
fig_width : float, default 8.5
figure width in inches
fig_height : float, default 7.5
figure height in inches
suptitle : str, default None
the sup title of the plot
plot_name : str
name to write to (end in .png or .pdf)
"""
from qmlify.analysis import bootstrap, compute_CIs
fig = plt.figure(figsize=(fig_width, fig_height))
plot_solvent=True if solvent_dict is not None else False
plot_complex=True if complex_dict is not None else False
if plot_solvent and plot_complex:
complex_ax = fig.add_subplot(2,2,1)
solvent_ax = fig.add_subplot(2,2,3)
plotter_ax = fig.add_subplot(1,2,2)
elif plot_solvent and not plot_complex:
solvent_ax = fig.add_subplot(1,2,1)
plotter_ax = fig.add_subplot(1,2,2)
elif plot_complex and not plot_solvent:
complex_ax = fig.add_subplot(1,2,1)
plotter_ax = fig.add_subplot(1,2,2)
#define an offset...
if plot_solvent:
solvent_keys = list(solvent_dict.keys())
offset = np.mean(solvent_dict[solvent_keys[0]])
#set xlims
mins, maxs = [], []
if plot_solvent:
mins += [np.min(val) for val in solvent_dict.values()]
maxs += [np.max(val) for val in solvent_dict.values()]
if plot_complex:
mins += [np.min(val) for val in complex_dict.values()]
maxs += [np.max(val) for val in complex_dict.values()]
xlims = (min(mins), max(maxs))
#set labels..
if plot_solvent:
solvent_ax.set_xlabel(f"work [kT]")
solvent_ax.get_yaxis().set_ticks([])
if plot_solvent:
color_counter = 0
for annealing_step in solvent_dict.keys():
sns.distplot(solvent_dict[annealing_step] - offset, color = cycle[color_counter], label = f"{annealing_step*timestep_in_fs/1000} ps", ax=solvent_ax)
color_counter +=1
solvent_ax.legend()
solvent_ax.set_xlim(xlims[0] - offset - 5, xlims[1] - offset + 5)
solvent_ax.set_title(f"solvent")
solvent_ax.set_ylabel("$P(work)$")
if plot_complex: complex_ax.xaxis.set_ticklabels([])
if plot_complex:
complex_ax.get_yaxis().set_ticks([])
color_counter = 0
for annealing_step in complex_dict.keys():
sns.distplot(complex_dict[annealing_step] - offset, color = cycle[color_counter], label = f"{annealing_step*timestep_in_fs/1000} ps", ax=complex_ax)
color_counter +=1
complex_ax.legend()
complex_ax.set_xlim(xlims[0] - offset - 5, xlims[1] - offset + 5)
complex_ax.set_title(f"complex")
complex_ax.set_ylabel("$P(work)$")
#plotter ax
if plot_solvent:
work_stddev = [np.std(vals) for vals in solvent_dict.values()]
bounds = [compute_CIs(bootstrap(val, np.std, num_resamples=10000), alpha=0.95) for val in solvent_dict.values()]
for idx in range(len(solvent_dict)):
label = 'solvent' if idx==0 else None
y = work_stddev[idx]
fix_bounds = np.array(bounds[idx])
fix_bounds[0] = y - fix_bounds[0]
fix_bounds[1] = fix_bounds[1] - y
plotter_ax.errorbar(list(solvent_dict.keys())[idx]*timestep_in_fs/1000,
y,
ls='None',
marker = 'o',
color = cycle[idx],
yerr = fix_bounds.reshape(2,1),
alpha=0.5,
markersize=10,
elinewidth=3,
label=label)
if idx==0: plotter_ax.legend()
plotter_ax.set_xscale('log')
plotter_ax.set_xlabel(f"annealing time [ps]")
plotter_ax.set_ylabel(f"work standard deviation [kT]")
if plot_complex:
work_stddev = [np.std(vals) for vals in complex_dict.values()]
bounds = [compute_CIs(bootstrap(val, np.std, num_resamples=10000), alpha=0.95) for val in complex_dict.values()]
for idx in range(len(complex_dict)):
label = 'complex' if idx==0 else None
y = work_stddev[idx]
fix_bounds = np.array(bounds[idx])
fix_bounds[0] = y - fix_bounds[0]
fix_bounds[1] = fix_bounds[1] - y
plotter_ax.errorbar(list(complex_dict.keys())[idx]*timestep_in_fs/1000,
y,
ls='None',
marker = '^',
color = cycle[idx],
yerr = fix_bounds.reshape(2,1),
alpha=0.5,
markersize=10,
elinewidth=3,
label = label)
if idx==0: plotter_ax.legend()
plotter_ax.set_xscale('log')
plotter_ax.set_xlabel(f"annealing time [ps]")
plotter_ax.set_ylabel(f"work standard deviation [kT]")
plt.tight_layout()
if suptitle is not None:
fig.suptitle(suptitle)
fig.savefig(plot_name)
|
"""
https://leetcode.com/problems/sort-array-by-parity/
Given an array A of non-negative integers, return an array consisting of all the even elements of A, followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
"""
# time complexity: O(n), space complexity: O(1)
class Solution:
def sortArrayByParity(self, A: List[int]) -> List[int]:
result = [0] * len(A)
front = 0
back = len(A) - 1
for i in A:
if i % 2 == 0:
result[front] = i
front += 1
else:
result[back] = i
back -= 1
return result
|
# Generated by Django 3.0.5 on 2020-06-10 04:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sg', '0004_auto_20200602_2018'),
]
operations = [
migrations.AddField(
model_name='writing',
name='category',
field=models.CharField(default='', max_length=50, verbose_name='์นดํ
๊ณ ๋ฆฌ'),
),
]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.special_math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def _random_pd_matrix(n, rng):
"""Random postive definite matrix."""
temp = rng.randn(n, n)
return temp.dot(temp.T)
class CholeskySolveTest(tf.test.TestCase):
_use_gpu = False
def setUp(self):
self.rng = np.random.RandomState(0)
def test_works_with_five_different_random_pos_def_matrices(self):
with self.test_session():
for n in range(1, 6):
for np_type, atol in [(np.float32, 0.05), (np.float64, 1e-5)]:
# Create 2 x n x n matrix
array = np.array(
[_random_pd_matrix(n, self.rng), _random_pd_matrix(n, self.rng)]
).astype(np_type)
chol = tf.cholesky(array)
for k in range(1, 3):
rhs = self.rng.randn(2, n, k).astype(np_type)
x = tf.cholesky_solve(chol, rhs)
self.assertAllClose(rhs, tf.matmul(array, x).eval(), atol=atol)
class CholeskySolveGpuTest(CholeskySolveTest):
_use_gpu = True
class EyeTest(tf.test.TestCase):
def test_non_batch_2x2(self):
num_rows = 2
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
def test_non_batch_2x3(self):
num_rows = 2
num_columns = 3
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
def test_1x3_batch_4x4(self):
num_rows = 4
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
self.assertAllEqual(batch_shape + [num_rows, num_rows], eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_1x3_batch_4x4_dynamic(self):
num_rows = 4
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
num_rows_ph = tf.placeholder(tf.int32)
batch_shape_ph = tf.placeholder(tf.int32)
eye = tf.eye(
num_rows_ph,
batch_shape=batch_shape_ph,
dtype=dtype)
eye_v = eye.eval(
feed_dict={
num_rows_ph: num_rows,
batch_shape_ph: batch_shape})
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_1x3_batch_5x4(self):
num_rows = 5
num_columns = 4
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype)
self.assertAllEqual(
batch_shape + [num_rows, num_columns],
eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_1x3_batch_5x4_dynamic(self):
num_rows = 5
num_columns = 4
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
num_rows_ph = tf.placeholder(tf.int32)
num_columns_ph = tf.placeholder(tf.int32)
batch_shape_ph = tf.placeholder(tf.int32)
eye = tf.eye(
num_rows_ph,
num_columns=num_columns_ph,
batch_shape=batch_shape_ph,
dtype=dtype)
eye_v = eye.eval(
feed_dict={
num_rows_ph: num_rows,
num_columns_ph: num_columns,
batch_shape_ph: batch_shape})
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_non_batch_0x0(self):
num_rows = 0
dtype = np.int64
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, dtype=dtype)
self.assertAllEqual((num_rows, num_rows), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
def test_non_batch_2x0(self):
num_rows = 2
num_columns = 0
dtype = np.int64
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
def test_non_batch_0x2(self):
num_rows = 0
num_columns = 2
dtype = np.int64
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, num_columns=num_columns, dtype=dtype)
self.assertAllEqual((num_rows, num_columns), eye.get_shape())
self.assertAllEqual(np_eye, eye.eval())
def test_1x3_batch_0x0(self):
num_rows = 0
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows).astype(dtype)
with self.test_session():
eye = tf.eye(num_rows, batch_shape=batch_shape, dtype=dtype)
self.assertAllEqual((1, 3, 0, 0), eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_1x3_batch_2x0(self):
num_rows = 2
num_columns = 0
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype)
self.assertAllEqual(
batch_shape + [num_rows, num_columns],
eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
def test_1x3_batch_0x2(self):
num_rows = 0
num_columns = 2
batch_shape = [1, 3]
dtype = np.float32
np_eye = np.eye(num_rows, num_columns).astype(dtype)
with self.test_session():
eye = tf.eye(
num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype)
self.assertAllEqual(
batch_shape + [num_rows, num_columns],
eye.get_shape())
eye_v = eye.eval()
for i in range(batch_shape[0]):
for j in range(batch_shape[1]):
self.assertAllEqual(np_eye, eye_v[i, j, :, :])
if __name__ == '__main__':
tf.test.main()
|
###############################################################################################################
# DESAFIO: 014
# TรTULO: Conversor de Temperaturas
# AULA: 07
# EXERCรCIO: Escreva um programa qu converta uma temperatura digitada em ยบC e converta para ยบF.
###############################################################################################################
temp = int(input("Digite a temperatura em ยบC: "))
f =((1.8*temp))+32 #Fahrenheit
k = (temp + 273) #kelvin
print(" Celsius: {:.1f} ยบc \n Fahrenheit: {:.0f} ยบf \n Kelvin: {:.0f} K.".format(temp,f,k))
|
from flask import Blueprint, render_template, request, url_for, redirect
from cruddy.model import Events
from flask_login import login_required
from cruddy.query import *
app_events = Blueprint('events', __name__,
url_prefix='/events',
template_folder='templates/',
static_folder='static',
static_url_path='assets')
def events_all():
table = Events.query.all()
json_ready = [item.read() for item in table]
return json_ready
def event_by_id(eventid):
return Events.query.filter_by(eventID=eventid).first()
@app_events.route('/edit/')
@login_required
def edit():
if not current_user.email:
return redirect(url_for('crud.crud_login'))
admin = "admin@admin.admin"
control = current_user.email
if control == admin:
return render_template("editevents.html", table=events_all())
else:
return redirect(url_for('crud.crud_login'))
@app_events.route('/create/', methods=["POST"])
def create():
if request.form:
po=Events(
request.form.get("name"),
request.form.get("date"),
request.form.get("description")
)
po.create()
return render_template("editevents.html", table=events_all())
@app_events.route('/read/', methods=["POST"])
def read():
table = []
if request.form:
eventid = request.form.get("eventid")
po = event_by_id(eventid)
if po is not None:
table = [po.read()]
return render_template("editevents.html", table=table)
@app_events.route('/update/', methods=["POST"])
def update():
if request.form:
eventid = request.form.get("eventid")
date = request.form.get("date")
name = request.form.get("name")
description = request.form.get("description")
po = event_by_id(eventid)
if po is not None:
po.update(date, name, description)
return render_template("editevents.html", table=events_all())
@app_events.route('/delete/', methods=["POST"])
def delete():
if request.form:
eventid = request.form.get("eventid")
po = event_by_id(eventid)
if po is not None:
po.delete()
return render_template("editevents.html", table=events_all())
|
# YouTube: https://youtu.be/27pMZOoPRzk
# Publicaรงรฃo: https://caffeinealgorithm.com/blog/20210924/if-elif-e-else-em-python/
'''
(if - se) condiรงรฃo for verdadeira:
o cรณdigo dentro do if รฉ executado
(elif - senรฃo se) condiรงรฃo for verdadeira (sรณ ocorre o elif caso a condiรงรฃo de if seja falsa):
o cรณdigo dentro do elif รฉ executado
(else - senรฃo) sem condiรงรฃo (sรณ ocorre caso a condiรงรฃo de if e de elif sejam falsas):
o cรณdigo dentro do else รฉ executado
'''
x = 30
if x == 10:
print('O valor de x รฉ igual a 10.')
elif x == 20:
print('O valor de x รฉ igual a 20.')
else:
print('O valor de x รฉ diferente de 10 e de 20.')
# O valor de x รฉ diferente de 10 e de 20.
|
import pydash
class HookBootstrap():
def __init__(self, context):
self._iniitializers = {'battery':self._init_battery, 'location': self._init_location}
self._validators = {'battery': self._validate_battery, 'location': self._validate_location}
self._api_config = context.api_configuration
async def load_worker_context(self, uuid, worker_record):
await self.download_maps()
if(not worker_record):
worker_record = {}
print('Cannot found worker ({}) context file. Generate default worker context'.format(uuid))
worker_record['uuid'] = uuid
worker_record['name'] = 'VirtualWorker({})'.format(uuid)
worker_record['type_specific'] = {}
for item in self._iniitializers:
worker_record = await self._iniitializers[item](worker_record)
return worker_record
else:
return await self._patch(worker_record)
return None
async def _init_battery(self, worker_record):
type_specific = worker_record['type_specific']
pydash.set_(type_specific, 'battery',
{
'battery_level': 75,
'charging_status': 0
})
return worker_record
async def _init_location(self, worker_record):
stations = await self._api_config.get_stations()
for s in stations:
map = await self._api_config.get_maps(s['map'])
if map:
type_specific = worker_record['type_specific']
if 'location' not in type_specific:
type_specific['location'] = {}
pydash.set_(type_specific['location'], 'map', s['map'])
pydash.set_(type_specific['location'], 'pose2d', s['pose'])
return worker_record
assert(False)
async def _validate_battery(self, worker_record):
if pydash.get(worker_record['type_specific'], 'battery') is None:
return {'result': False, 'message': 'battery information is not exist'}
return {'result': True}
async def _patch(self, worker_record):
for item in self._validators:
check = await self._validators[item](worker_record)
if check['result'] is False:
worker_record = await self._iniitializers[item](worker_record)
print('validation failed while path() {}:{}'.format(check['message'], {'updated_worker': worker_record}))
return worker_record
async def _validate_location(self, worker_record):
pose = pydash.get(worker_record['type_specific']['location'], 'pose2d')
map = pydash.get(worker_record['type_specific']['location'], 'map')
if pose is None:
return {'result': False, 'message': 'pose information is not loaded correctly'}
if map is None:
return {'result': False, 'message': 'map information is not loaded correctly'}
return {'result': True}
async def download_maps(self):
try:
map_list = await self._api_config.get_maps()
def cb(m):
return pydash.get(m, 'site')
map_list = pydash.filter_(map_list, cb)
if len(map_list) is 0:
print('there are no maps on site configuration')
return False
except Exception as err:
print('failed to download maps')
return False
return True
|
import datetime
import time
class Time(object):
def __init__(self):
self.init_time = time.time()
self.cosmos_time = 0 # Cosmos time
self.microsecond = 0
self.second = 0
self.minute = 0
self.hour = 0
self.date = 0
self.month = 0
self.year = 0
def now(self):
now = datetime.datetime.now()
self.cosmos_time = time.time() - self.init_time
self.microsecond = now.microsecond
self.second = now.second
self.minute = now.minute
self.hour = now.hour
self.date = now.day
self.month = now.month
self.year = now.year
return self
@staticmethod
def calc_time(decorated):
def wrapper(*args, **kwargs):
start_time = time.time()
var = decorated(*args, **kwargs)
if hasattr(args[0], 'log') and args[0].log:
log = args[0].log
log.info(f"Done. [{round(time.time() - start_time, 3)}s].\n\n")
else:
print(f"Done. [{round(time.time() - start_time, 3)}s].\n\n")
return var
return wrapper
def time(self):
return time.time() - self.init_time
def round_time(self):
return round(self.time(), 3)
|
class AriaException(Exception):
pass
class ProviderError(AriaException):
pass
class ProviderNotReady(ProviderError):
pass
class EmptyPlaylist(AriaException):
pass
|
DEBUG = False
USERNAME = 'hikaru'
CHANNEL = 'random'
VOCAB = {
'RANDOM': ['random', ':troll:', ':trollface:'],
'PASS': ['pass', 'skip'],
'RESIGN': ['resign', 'give up'],
'VOTE': ['vote', 'move', 'play'],
'VOTES': ['votes', 'moves', 'voted', 'chance'],
'CAPTURES': ['captures'],
'SHOW': ['show', 'board'],
'YES': ['yes', 'yeah', 'ya', 'y', 'ja', 'please', 'ok', 'yep'],
'NO': ['no', 'nope', 'n', 'nee', "don't", 'cancel'],
}
RESPONSES = {
'RESIGN_CONFIRMATION': [
'Are you sure you want to resign?',
'Sure?',
],
'RESIGN_CANCELLED': [
'Ok.',
'Resignation cancelled.',
],
'UNKNOWN': [
"I don't know.",
'What do you mean?',
"That doesn't make any sense.",
"I'm just a bot.",
],
}
# How often to play moves. See `man crontab` for format information.
if DEBUG:
CRON = '*/2 * * * *' # Every two minutes.
else:
CRON = '0 9-18 * * 1-5' # Hourly between 9:00 and 18:00 on weekdays.
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ExternalIntegrationArgs', 'ExternalIntegration']
@pulumi.input_type
class ExternalIntegrationArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ExternalIntegration resource.
:param pulumi.Input[str] name: The name of this integration
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this integration
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class _ExternalIntegrationState:
def __init__(__self__, *,
external_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
signalfx_aws_account: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ExternalIntegration resources.
:param pulumi.Input[str] external_id: The external ID to use with your IAM role and with `aws.Integration`.
:param pulumi.Input[str] name: The name of this integration
:param pulumi.Input[str] signalfx_aws_account: The AWS Account ARN to use with your policies/roles, provided by SignalFx.
"""
if external_id is not None:
pulumi.set(__self__, "external_id", external_id)
if name is not None:
pulumi.set(__self__, "name", name)
if signalfx_aws_account is not None:
pulumi.set(__self__, "signalfx_aws_account", signalfx_aws_account)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> Optional[pulumi.Input[str]]:
"""
The external ID to use with your IAM role and with `aws.Integration`.
"""
return pulumi.get(self, "external_id")
@external_id.setter
def external_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "external_id", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of this integration
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="signalfxAwsAccount")
def signalfx_aws_account(self) -> Optional[pulumi.Input[str]]:
"""
The AWS Account ARN to use with your policies/roles, provided by SignalFx.
"""
return pulumi.get(self, "signalfx_aws_account")
@signalfx_aws_account.setter
def signalfx_aws_account(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "signalfx_aws_account", value)
class ExternalIntegration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
SignalFx AWS CloudWatch integrations using Role ARNs. For help with this integration see [Connect to AWS CloudWatch](https://docs.signalfx.com/en/latest/integrations/amazon-web-services.html#connect-to-aws).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider.
> **WARNING** This resource implements a part of a workflow. You must use it with `aws.Integration`. Check with SignalFx support for your realm's AWS account id.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_signalfx as signalfx
aws_myteam_extern = signalfx.aws.ExternalIntegration("awsMyteamExtern")
signalfx_assume_policy = pulumi.Output.all(aws_myteam_extern.signalfx_aws_account, aws_myteam_extern.external_id).apply(lambda signalfx_aws_account, external_id: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[signalfx_aws_account],
)],
conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(
test="StringEquals",
variable="sts:ExternalId",
values=[external_id],
)],
)]))
aws_sfx_role = aws.iam.Role("awsSfxRole",
description="signalfx integration to read out data and send it to signalfxs aws account",
assume_role_policy=signalfx_assume_policy.json)
aws_read_permissions = aws.iam.Policy("awsReadPermissions",
description="farts",
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"dynamodb:ListTables",
"dynamodb:DescribeTable",
"dynamodb:ListTagsOfResource",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:DescribeVolumes",
"ec2:DescribeReservedInstances",
"ec2:DescribeReservedInstancesModifications",
"ec2:DescribeTags",
"organizations:DescribeOrganization",
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:DescribeAlarms",
"sqs:ListQueues",
"sqs:GetQueueAttributes",
"sqs:ListQueueTags",
"elasticmapreduce:ListClusters",
"elasticmapreduce:DescribeCluster",
"kinesis:ListShards",
"kinesis:ListStreams",
"kinesis:DescribeStream",
"kinesis:ListTagsForStream",
"rds:DescribeDBInstances",
"rds:ListTagsForResource",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTags",
"elasticache:describeCacheClusters",
"redshift:DescribeClusters",
"lambda:GetAlias",
"lambda:ListFunctions",
"lambda:ListTags",
"autoscaling:DescribeAutoScalingGroups",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetBucketTagging",
"ecs:ListServices",
"ecs:ListTasks",
"ecs:DescribeTasks",
"ecs:DescribeServices",
"ecs:ListClusters",
"ecs:DescribeClusters",
"ecs:ListTaskDefinitions",
"ecs:ListTagsForResource",
"apigateway:GET",
"cloudfront:ListDistributions",
"cloudfront:ListTagsForResource",
"tag:GetResources",
"es:ListDomainNames",
"es:DescribeElasticsearchDomain"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
\"\"\")
sfx_read_attach = aws.iam.RolePolicyAttachment("sfx-read-attach",
role=aws_sfx_role.name,
policy_arn=aws_read_permissions.arn)
aws_myteam = signalfx.aws.Integration("awsMyteam",
enabled=True,
integration_id=aws_myteam_extern.id,
external_id=aws_myteam_extern.external_id,
role_arn=aws_sfx_role.arn,
regions=["us-east-1"],
poll_rate=300,
import_cloud_watch=True,
enable_aws_usage=True)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: The name of this integration
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ExternalIntegrationArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
SignalFx AWS CloudWatch integrations using Role ARNs. For help with this integration see [Connect to AWS CloudWatch](https://docs.signalfx.com/en/latest/integrations/amazon-web-services.html#connect-to-aws).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider.
> **WARNING** This resource implements a part of a workflow. You must use it with `aws.Integration`. Check with SignalFx support for your realm's AWS account id.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
import pulumi_signalfx as signalfx
aws_myteam_extern = signalfx.aws.ExternalIntegration("awsMyteamExtern")
signalfx_assume_policy = pulumi.Output.all(aws_myteam_extern.signalfx_aws_account, aws_myteam_extern.external_id).apply(lambda signalfx_aws_account, external_id: aws.iam.get_policy_document(statements=[aws.iam.GetPolicyDocumentStatementArgs(
actions=["sts:AssumeRole"],
principals=[aws.iam.GetPolicyDocumentStatementPrincipalArgs(
type="AWS",
identifiers=[signalfx_aws_account],
)],
conditions=[aws.iam.GetPolicyDocumentStatementConditionArgs(
test="StringEquals",
variable="sts:ExternalId",
values=[external_id],
)],
)]))
aws_sfx_role = aws.iam.Role("awsSfxRole",
description="signalfx integration to read out data and send it to signalfxs aws account",
assume_role_policy=signalfx_assume_policy.json)
aws_read_permissions = aws.iam.Policy("awsReadPermissions",
description="farts",
policy=\"\"\"{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"dynamodb:ListTables",
"dynamodb:DescribeTable",
"dynamodb:ListTagsOfResource",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"ec2:DescribeVolumes",
"ec2:DescribeReservedInstances",
"ec2:DescribeReservedInstancesModifications",
"ec2:DescribeTags",
"organizations:DescribeOrganization",
"cloudwatch:ListMetrics",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:DescribeAlarms",
"sqs:ListQueues",
"sqs:GetQueueAttributes",
"sqs:ListQueueTags",
"elasticmapreduce:ListClusters",
"elasticmapreduce:DescribeCluster",
"kinesis:ListShards",
"kinesis:ListStreams",
"kinesis:DescribeStream",
"kinesis:ListTagsForStream",
"rds:DescribeDBInstances",
"rds:ListTagsForResource",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTags",
"elasticache:describeCacheClusters",
"redshift:DescribeClusters",
"lambda:GetAlias",
"lambda:ListFunctions",
"lambda:ListTags",
"autoscaling:DescribeAutoScalingGroups",
"s3:ListAllMyBuckets",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetBucketTagging",
"ecs:ListServices",
"ecs:ListTasks",
"ecs:DescribeTasks",
"ecs:DescribeServices",
"ecs:ListClusters",
"ecs:DescribeClusters",
"ecs:ListTaskDefinitions",
"ecs:ListTagsForResource",
"apigateway:GET",
"cloudfront:ListDistributions",
"cloudfront:ListTagsForResource",
"tag:GetResources",
"es:ListDomainNames",
"es:DescribeElasticsearchDomain"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
\"\"\")
sfx_read_attach = aws.iam.RolePolicyAttachment("sfx-read-attach",
role=aws_sfx_role.name,
policy_arn=aws_read_permissions.arn)
aws_myteam = signalfx.aws.Integration("awsMyteam",
enabled=True,
integration_id=aws_myteam_extern.id,
external_id=aws_myteam_extern.external_id,
role_arn=aws_sfx_role.arn,
regions=["us-east-1"],
poll_rate=300,
import_cloud_watch=True,
enable_aws_usage=True)
```
:param str resource_name: The name of the resource.
:param ExternalIntegrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExternalIntegrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExternalIntegrationArgs.__new__(ExternalIntegrationArgs)
__props__.__dict__["name"] = name
__props__.__dict__["external_id"] = None
__props__.__dict__["signalfx_aws_account"] = None
super(ExternalIntegration, __self__).__init__(
'signalfx:aws/externalIntegration:ExternalIntegration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
external_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
signalfx_aws_account: Optional[pulumi.Input[str]] = None) -> 'ExternalIntegration':
"""
Get an existing ExternalIntegration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] external_id: The external ID to use with your IAM role and with `aws.Integration`.
:param pulumi.Input[str] name: The name of this integration
:param pulumi.Input[str] signalfx_aws_account: The AWS Account ARN to use with your policies/roles, provided by SignalFx.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExternalIntegrationState.__new__(_ExternalIntegrationState)
__props__.__dict__["external_id"] = external_id
__props__.__dict__["name"] = name
__props__.__dict__["signalfx_aws_account"] = signalfx_aws_account
return ExternalIntegration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="externalId")
def external_id(self) -> pulumi.Output[str]:
"""
The external ID to use with your IAM role and with `aws.Integration`.
"""
return pulumi.get(self, "external_id")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of this integration
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="signalfxAwsAccount")
def signalfx_aws_account(self) -> pulumi.Output[str]:
"""
The AWS Account ARN to use with your policies/roles, provided by SignalFx.
"""
return pulumi.get(self, "signalfx_aws_account")
|
import shutil
from config import *
def clean_derived():
"""delete the derived files, leave the original movies"""
# TODO: add ENCODED_DIR to config.py
paths = [CLIP_DIR, IMG_DIR, RAW_AUDIO_DIR, AUDIO_DIR, ENCODE_DIR]
for derived_path in paths:
print("Deleting:", derived_path)
shutil.rmtree(derived_path, ignore_errors=True)
if __name__ == "__main__":
clean_derived()
|
from easytello import tello
my_drone = tello.Tello()
#my_drone.streamon()
my_drone.takeoff()
for i in range(4):
#my_drone.forward(1)
my_drone.cw(90)
my_drone.land()
# my_drone.streamoff()
|
#!/usr/bin/env python3.9
# Copyright Pit Kleyersburg <pitkley@googlemail.com>
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified or distributed
# except according to those terms.
import json
import sys
from dataclasses import dataclass
from enum import Enum, auto
from html.parser import HTMLParser
from pathlib import Path
from typing import Optional
@dataclass
class ServiceOperation:
name: str
summary: str
documentation_url: Optional[str]
@dataclass
class ServiceIndex:
name: str
version: str
alternative_names: list[str]
operations: list[ServiceOperation]
def final_index(self) -> tuple[str, list[str], dict[str, tuple[Optional[str], Optional[str]]]]:
operation_index = {}
for operation in self.operations:
operation_index[operation.name] = (operation.summary, operation.documentation_url)
return self.version, self.alternative_names, operation_index
class SummarizationHTMLParser(HTMLParser):
class State(Enum):
INITIAL = auto()
IN_SUMMARY = auto()
DONE = auto()
def __init__(self):
super().__init__()
self.state = self.State.INITIAL
self.summary = ""
def handle_starttag(self, tag, attrs):
if self.state == self.State.INITIAL and tag == "p":
self.state = self.State.IN_SUMMARY
# Reset the summary in case we had read any initial text (i.e. the documentation provided is not HTML).
self.summary = ""
def handle_data(self, data):
if self.state == self.State.INITIAL or self.state == self.State.IN_SUMMARY:
self.summary += data
def handle_endtag(self, tag):
if tag == "p":
self.state = self.State.DONE
def summarize_documentation(documentation: Optional[str]) -> Optional[str]:
if not documentation:
return None
html_parser = SummarizationHTMLParser()
html_parser.feed(documentation)
summary = html_parser.summary.strip()
if not summary:
return None
return summary
def process_service(service_path: Path) -> Optional[ServiceIndex]:
latest_api_version = max(service_path.iterdir(), key=lambda path: path.name)
service_file = latest_api_version.joinpath("service-2.json")
if not service_file.is_file():
return None
with service_file.open("r") as fh:
service = json.load(fh)
service_name = service_path.name
if "operations" not in service:
return None
operations = []
for operation_name, operation in service["operations"].items():
documentation = operation.get("documentation", None)
documentation_summary = summarize_documentation(documentation)
operations.append(ServiceOperation(
name=operation_name,
summary=documentation_summary,
documentation_url=operation.get("documentationUrl", None),
))
alternative_names = []
if "metadata" in service:
for key in (
"serviceAbbreviation",
"serviceFullName",
"serviceId",
"signingName",
):
if key in service["metadata"]:
alternative_names.append(service["metadata"][key])
return ServiceIndex(
name=service_name,
version=latest_api_version.name,
alternative_names=alternative_names,
operations=operations,
)
def main(
botocore_data_root: Path,
*,
export_as_json: bool,
):
services = {}
for service_path in botocore_data_root.iterdir():
if not service_path.is_dir():
continue
service_index = process_service(service_path)
services[service_index.name] = service_index
# Persist the final-index to the extension
index_file = Path("json-indices/api.json") if export_as_json else Path("extension/index/api.js")
index_file.parent.mkdir(exist_ok=True)
with index_file.open("w") as fh:
if not export_as_json:
fh.write("// Content retrieved from: https://github.com/boto/botocore/\n")
fh.write("// It is licensed under Apache-2.0, copyright Amazon.com, Inc. or its affiliates.\n")
fh.write("var apiSearchIndex={\n")
else:
fh.write("{\n")
service_count = len(services.keys())
for index, (service_name, service) in enumerate(sorted(services.items()), start=1):
fh.write(f" \"{service_name}\":")
json.dump(service.final_index(), fh, sort_keys=True)
if not export_as_json or index != service_count:
fh.write(",")
fh.write("\n")
fh.write("}")
if not export_as_json:
fh.write(";")
if __name__ == '__main__':
export_as_json = "--export-as-json" in sys.argv
main(
Path("index-sources/botocore/botocore/data/"),
export_as_json=export_as_json,
)
|
import random
import mock
import pytest
from statue.cache import Cache
from statue.commands_filter import CommandsFilter
from statue.commands_map_builder import CommandsMapBuilder
from statue.config.configuration import Configuration
from statue.context import Context
from statue.exceptions import CacheError, CommandsMapBuilderError
from tests.constants import (
COMMAND1,
COMMAND2,
COMMAND3,
CONTEXT1,
CONTEXT2,
CONTEXT3,
CONTEXT_HELP_STRING1,
CONTEXT_HELP_STRING2,
CONTEXT_HELP_STRING3,
SOURCE1,
SOURCE2,
SOURCE3,
)
# Successful tests
def test_commands_map_builder_with_default_settings(tmp_path):
sources_list = [tmp_path / SOURCE1, tmp_path / SOURCE2, tmp_path / SOURCE3]
configuration = mock.Mock()
configuration.sources_repository.sources_list = sources_list
commands_map_builder = CommandsMapBuilder(configuration=configuration)
commands_map = commands_map_builder.build()
assert commands_map == configuration.build_commands_map.return_value
configuration.build_commands_map.assert_called_once_with(
sources=sources_list, commands_filter=CommandsFilter()
)
def test_commands_map_builder_with_specified_sources(tmp_path):
sources_list = [tmp_path / SOURCE1, tmp_path / SOURCE2, tmp_path / SOURCE3]
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(
specified_sources=sources_list, configuration=configuration
)
commands_map = commands_map_builder.build()
assert commands_map == configuration.build_commands_map.return_value
configuration.build_commands_map.assert_called_once_with(
sources=sources_list, commands_filter=CommandsFilter()
)
def test_commands_map_builder_with_allowed_commands(tmp_path):
sources_list = [tmp_path / SOURCE1, tmp_path / SOURCE2, tmp_path / SOURCE3]
allowed_commands = [COMMAND1, COMMAND2, COMMAND3]
configuration = mock.Mock()
configuration.sources_repository.sources_list = sources_list
commands_map_builder = CommandsMapBuilder(
specified_sources=sources_list,
configuration=configuration,
allowed_commands=allowed_commands,
)
commands_map = commands_map_builder.build()
assert commands_map == configuration.build_commands_map.return_value
configuration.build_commands_map.assert_called_once_with(
sources=sources_list,
commands_filter=CommandsFilter(allowed_commands=allowed_commands),
)
def test_commands_map_builder_with_denied_commands(tmp_path):
sources_list = [tmp_path / SOURCE1, tmp_path / SOURCE2, tmp_path / SOURCE3]
denied_commands = [COMMAND1, COMMAND2, COMMAND3]
configuration = mock.Mock()
configuration.sources_repository.sources_list = sources_list
commands_map_builder = CommandsMapBuilder(
specified_sources=sources_list,
configuration=configuration,
denied_commands=denied_commands,
)
commands_map = commands_map_builder.build()
assert commands_map == configuration.build_commands_map.return_value
configuration.build_commands_map.assert_called_once_with(
sources=sources_list,
commands_filter=CommandsFilter(denied_commands=denied_commands),
)
def test_commands_map_builder_with_contexts(tmp_path):
sources_list = [tmp_path / SOURCE1, tmp_path / SOURCE2, tmp_path / SOURCE3]
contexts = [
Context(name=CONTEXT1, help=CONTEXT_HELP_STRING1),
Context(name=CONTEXT2, help=CONTEXT_HELP_STRING2),
Context(name=CONTEXT3, help=CONTEXT_HELP_STRING3),
]
configuration = mock.Mock()
configuration.sources_repository.sources_list = sources_list
commands_map_builder = CommandsMapBuilder(
specified_sources=sources_list, configuration=configuration, contexts=contexts
)
commands_map = commands_map_builder.build()
assert commands_map == configuration.build_commands_map.return_value
configuration.build_commands_map.assert_called_once_with(
sources=sources_list, commands_filter=CommandsFilter(contexts=contexts)
)
def test_commands_map_builder_on_previous_evaluation():
previous = 3
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(
configuration=configuration, previous=previous
)
commands_map = commands_map_builder.build()
assert commands_map == configuration.cache.get_evaluation.return_value.commands_map
configuration.cache.get_evaluation.assert_called_once_with(previous - 1)
def test_commands_map_builder_on_failed_evaluation():
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(configuration=configuration, failed=True)
commands_map = commands_map_builder.build()
assert commands_map == configuration.cache.recent_failed_evaluation.commands_map
def test_commands_map_builder_with_failed_only():
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(
configuration=configuration, failed_only=True
)
commands_map = commands_map_builder.build()
assert (
commands_map
== configuration.cache.recent_failed_evaluation.failure_evaluation.commands_map
)
def test_commands_map_builder_with_previous_and_failed_only():
previous = 3
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(
configuration=configuration, failed_only=True, previous=previous
)
commands_map = commands_map_builder.build()
expected_commands_map = (
configuration.cache.get_evaluation.return_value.failure_evaluation.commands_map
)
assert commands_map == expected_commands_map
configuration.cache.get_evaluation.assert_called_once_with(previous - 1)
# Exception tests
def test_commands_map_builder_cannot_be_set_with_both_failed_and_previous():
commands_map_builder = CommandsMapBuilder(
configuration=mock.Mock(), previous=random.randint(0, 5), failed=True
)
with pytest.raises(
CommandsMapBuilderError,
match='^"failed" and "previous" cannot both be set when building commands map$',
):
commands_map_builder.build()
def test_commands_map_builder_fail_due_to_empty_sources(tmp_path):
configuration = mock.Mock()
configuration.sources_repository.sources_list = []
commands_map_builder = CommandsMapBuilder(configuration=configuration)
with pytest.raises(
CommandsMapBuilderError,
match="^No source was specified and no Sources section in configuration.$",
):
commands_map_builder.build()
def test_commands_map_builder_on_previous_evaluation_fails_due_to_cache_error():
previous = 3
error_message = "This is a message"
configuration = mock.Mock()
commands_map_builder = CommandsMapBuilder(
configuration=configuration, previous=previous
)
configuration.cache.get_evaluation.side_effect = CacheError(error_message)
with pytest.raises(CommandsMapBuilderError, match=f"^{error_message}$"):
commands_map_builder.build()
configuration.cache.get_evaluation.assert_called_once_with(previous - 1)
def test_commands_map_builder_on_failed_evaluation_fails_due_to_cache_error():
message = "This is a message"
configuration = Configuration(cache=Cache(10))
commands_map_builder = CommandsMapBuilder(configuration=configuration, failed=True)
with mock.patch.object(
Cache, "recent_failed_evaluation", new_callable=mock.PropertyMock
) as recent_failed_evaluation:
recent_failed_evaluation.side_effect = CacheError(message)
with pytest.raises(CommandsMapBuilderError, match=f"^{message}$"):
commands_map_builder.build()
def test_commands_map_builder_on_failed_old_evaluation_fails_due_to_cache_error():
message = "This is a message"
configuration = Configuration(cache=Cache(10))
commands_map_builder = CommandsMapBuilder(
configuration=configuration, failed_only=True
)
with mock.patch.object(
Cache, "recent_failed_evaluation", new_callable=mock.PropertyMock
) as recent_failed_evaluation:
recent_failed_evaluation.side_effect = CacheError(message)
with pytest.raises(CommandsMapBuilderError, match=f"^{message}$"):
commands_map_builder.build()
|
import json
from types import SimpleNamespace
import relogic.utils.crash_on_ipy
from relogic.logickit.base.constants import MIXSENT_TASK
from relogic.logickit.dataflow import TASK_TO_DATAFLOW_CLASS_MAP, MixSentDataFlow
from relogic.logickit.tokenizer.tokenizer_roberta_xlm import RobertaXLMTokenizer
config = SimpleNamespace(
**{
"buckets": [(0, 100), (100, 250), (250, 512)],
"max_seq_length": 512
})
tokenizers = {
"xlmr": RobertaXLMTokenizer.from_pretrained("xlmr.large.v0")
}
dataflow: MixSentDataFlow = TASK_TO_DATAFLOW_CLASS_MAP[MIXSENT_TASK](
task_name=MIXSENT_TASK,
config=config,
tokenizers=tokenizers,
label_mapping=None)
examples = [
{"text_a": ["EFE", "-", "Cantabria", "Madrid", ",", "23", "may", "(", "EFE", ")", "."],
"text_b": ["De", "este", "modo", ",", "podr\u00edan", "ser", "unos", "1.500", "los", "milicianos",
"que", "se", "han", "rendido", "en", "los", "tres", "\u00faltimos", "d\u00edas", ",",
"mientras", "se", "supone", "que", "los", "mil", "componentes", "restantes", "del", "ESL",
"tratan", "de", "llegar", "a", "Israel", "."],
"text_c": ["-", "Cantabria", "Madrid", ",", "23", "d\u00edas", ",", "mientras", "se", "supone", "que",
"los", "mil", "componentes", "restantes", "del", "ESL", "tratan", "de", "llegar", "a", "Israel"],
"span_a": [1, 6], "span_b": [18, 35], "span_c_a": [0, 5], "span_c_b": [5, 22]},
{"text_a": ["El", "entrenador", "de", "la", "Real", "Sociedad", "Javier", "Clemente", "dijo", "hoy", "que", "el",
"club", "guipuzcoano", "no", "tiene", "opciones", "de", "acceder", "al", "mercado", "espa\u00f1ol",
",", "ya", "que", "a", "su", "juicio", "\"", "es", "imposible", "\"", "fichar", "jugadores", "de", "\"",
"cierto", "nivel", "\"", ",", "por", "lo", "que", "afirm\u00f3", "que", "esa", "idea", "\"", "hay", "que",
"quit\u00e1rsela", "de", "la", "cabeza", "\"", "."],
"text_b": ["Este", "portavoz", "sindical", "dijo", "tras", "la", "desconvocatoria", "de", "la", "huelga", "que",
"este", "preacuerdo", "\"", "puede", "ser", "perjudicial", "y", "regresivo", "para", "las", "condiciones",
"laborales", "\"", "de", "los", "trabajadores", ",", "si", "bien", "el", "sindicato", "respet\u00f3", "su",
"decisi\u00f3n", "."],
"text_c": ["club", "guipuzcoano", "no", "tiene", "opciones", "de", "acceder", "al", "mercado", "espa\u00f1ol", ",",
"ya", "que", "a", "su", "juicio", "\"", "es", "imposible", "\"", "fichar", "jugadores", "de", "\"",
"cierto", "nivel", "\"", "sindical", "dijo", "tras", "la", "desconvocatoria", "de", "la", "huelga",
"que", "este", "preacuerdo", "\"", "puede", "ser", "perjudicial", "y", "regresivo", "para"],
"span_a": [12, 39], "span_b": [2, 20], "span_c_a": [0, 27], "span_c_b": [27, 45]},
{"text_a": ["Todos", "estos", "temas", "se", "tratar\u00e1n", "en", "la", "Comisi\u00f3n", "Mixta", "RENFE-Junta",
"de", "Comunidades", ",", "creada", "para", "negociar", "todos", "los", "problemas", "que", "existen",
"al", "respecto", "en", "la", "Comunidad", "Aut\u00f3noma", "y", "de", "la", "cual", "formar\u00e1",
"parte", "el", "Ayuntamiento", "de", "Alc\u00e1zar", "como", "observador", "."],
"text_b": ["Este", "reconocido", "profesional", ",", "con", "estudios", "en", "Nueva", "York", ",", "Tokio",
"y", "Buenos", "Aires", ",", "tiene", "una", "amplia", "experiencia", "en", "el", "dise\u00f1o",
"de", "espacios", "multidisciplinares", "y", "con", "fines", "culturales", "y", "entre", "otros",
"proyectos", "trabaja", "en", "la", "actualidad", "en", "el", "dise\u00f1o", "de", "un", "centro",
"cultural", "para", "Filadelfia", ",", "que", "albergar\u00e1", "a", "la", "orquesta", "titular",
"de", "esa", "ciudad", "."],
"text_c": ["al", "respecto", "en", "la", "Comunidad", "Aut\u00f3noma", "y", "de", "la", "cual", "formar\u00e1",
"parte", "el", "Ayuntamiento", "de", "Alc\u00e1zar", "como", "observador", ".", "experiencia", "en",
"el", "dise\u00f1o", "de", "espacios", "multidisciplinares", "y", "con", "fines", "culturales", "y",
"entre", "otros", "proyectos", "trabaja", "en", "la", "actualidad", "en", "el", "dise\u00f1o", "de",
"un", "centro", "cultural", "para", "Filadelfia"],
"span_a": [21, 40], "span_b": [18, 46], "span_c_a": [0, 19], "span_c_b": [19, 47]},
{"text_a": ["Posibilidad", "de", "alg\u00fan", "banco", "de", "niebla", "en", "el", "sureste", "."],
"text_b": ["Temperaturas", "en", "ascenso", "ligero", "en", "el", "\u00e1rea", "del", "cant\u00e1brico",
"oriental", ",", "La", "Rioja", ",", "Navarra", ",", "Arag\u00f3n", "y", "en", "Canarias", "y",
"sin", "cambios", "en", "el", "resto", "."],
"text_c": ["de", "niebla", "en", "el", "sureste", "en", "ascenso", "ligero", "en", "el", "\u00e1rea", "del",
"cant\u00e1brico", "oriental", ",", "La", "Rioja"],
"span_a": [4, 9], "span_b": [1, 13], "span_c_a": [0, 5], "span_c_b": [5, 17]}]
dataflow.update_with_jsons(examples)
def check_example(example):
a_tokens = example.a_tokens
b_tokens = example.b_tokens
c_tokens = example.c_tokens
span_a_selected_index = example.span_a_selected_index
span_b_selected_index = example.span_b_selected_index
span_c_a_selected_index = example.span_c_a_selected_index
span_c_b_selected_index = example.span_c_b_selected_index
assert((len(span_a_selected_index) + len(span_b_selected_index)) ==
(len(span_c_a_selected_index) + len(span_c_b_selected_index)))
assert(a_tokens[span_a_selected_index[0]] == c_tokens[span_c_a_selected_index[0]])
assert (b_tokens[span_b_selected_index[0]] == c_tokens[span_c_b_selected_index[0]])
for mb in dataflow.get_minibatches(minibatch_size=2):
mb.generate_input("cpu", True)
for example in mb.examples:
check_example(example)
print("Passed.")
|
import asyncio
import logging
from datetime import datetime
from discord.ext import commands
logger = logging.getLogger(__name__)
class Assign(commands.Cog):
"""A cog to assign specific roles."""
def __init__(self, bot):
self.bot = bot
@commands.command(name="assign")
async def assing_trusted_role(self, ctx):
smp_role_id = 489176009120415744
trusted_role_id = 705274433723564083
guild = ctx.guild
all_members = guild.members
print(len(all_members))
for member in all_members:
member_join_date = member.joined_at
today = datetime.now()
days_present = today-member_join_date
roles_id = [role.id for role in member.roles]
if smp_role_id in roles_id:
if days_present.days > 28:
trusted_role = guild.get_role(trusted_role_id)
await member.add_roles(trusted_role)
logger.info(f"{str(member)} is given the {trusted_role.name} role.")
await asyncio.sleep(1)
else:
pass
def setup(bot):
bot.add_cog(Assign(bot))
logger.info("Assign Cog loaded.")
|
from pipeline.logger import setup_logger
from pipeline.utils import load_config
from torch.utils.data import DataLoader
import argparse
import os
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
args = parser.parse_args()
config = load_config(args.config_path)
train_data_loader = DataLoader(
config.train_dataset,
batch_size=config.batch_size,
shuffle=True,
pin_memory=True,
num_workers=config.num_workers)
val_data_loader = DataLoader(
config.val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers)
model = config.model
model_save_path = config.model_save_path
os.makedirs(model_save_path, exist_ok=True)
logger_path = os.path.join(model_save_path, "log.txt")
setup_logger(out_file=logger_path)
trainer = config.trainer_cls(
model=model,
train_data_loader=train_data_loader,
val_data_loader=val_data_loader,
epoch_count=config.epoch_count,
optimizer=config.optimizer,
scheduler=config.scheduler,
loss_calculator=config.loss_calculator,
metric_calculator=config.metric_calculator,
print_frequency=config.print_frequency
)
trainer.run()
if __name__ == "__main__":
main()
|
from app import db
from app.models.base_model import BaseEntity
category_page = db.Table(
'category_page',
db.Column('category_id', db.Integer, db.ForeignKey('category.id')),
db.Column('page_id', db.Integer, db.ForeignKey('page.id'))
)
# relationship required for adjacency list (self referencial many to many
# relationship)
category_category = db.Table(
'category_category',
db.Column('super_id', db.Integer, db.ForeignKey('category.id')),
db.Column('sub_id', db.Integer, db.ForeignKey('category.id'))
)
class Category(db.Model, BaseEntity):
"""
Categories for pages similar to mediawiki categories.
https://www.mediawiki.org/wiki/Help:Categories
"""
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), unique=True)
pages = db.relationship('Page', secondary=category_page,
backref=db.backref('categories'))
sub_categories = db.relationship(
'Category', secondary=category_category,
primaryjoin=id == category_category.c.super_id,
secondaryjoin=id == category_category.c.sub_id,
backref='super_categories')
def __init__(self, name):
self.name = name
def has_parent_category(self):
return len(self.super_categories) > 0
def __str__(self):
return "Category: %s" % self.name
|
import unittest
import os
from main import query, preprocess, matching_sim
class MainTesting(unittest.TestCase):
def query_test_nofile(self):
"""
Testing query method
"""
result = query("filename")
expected_result = []
self.assertEquals(len(expected_result), len(result))
def test_preprocess(self):
"""
Test that the function works as expected within the main
"""
path = preprocess("2016_BC003122_ CC_L.npy")
self.assertEqual(
"../test_images_kaggle/processed_images/lcc/2016_BC003122_ CC_L.npy", path
)
def test_matching_sim(self):
"""
Test that this calls upon the model or returns 69.69 currently.
"""
self.assertEqual(matching_sim(query("filename")), 69.69)
if __name__ == "__main__":
unittest.main()
|
#faรงa um programa que leia um ano qualquer e mostre se ele รฉ Bissexto
#minha resposta - nao consegui esta dando erro
#ano = int(input('Que ano quer analisar? Coloque 0 para analisar o ano atual:'))
#bissexto = ano % 4
#bissexto2 = ano % 100
#bissexto3 = ano % 400
#total = bissexto + bissexto2 + bissexto3
#if total == 0:
# print('O ano {} รฉ BISSEXTO'.format(ano))
#else:
# print('O ano {} NรO รฉ BISSEXTO'.format(ano))
#resposta do Gustavo
from datetime import date
ano = int(input('Que ano quer analisar? Coloque 0 para o ano atual: '))
if ano == 0:
ano = date.today().year #esta instrucao vai pegar o ano atual configurado na maquina
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('O ano {} รฉ BISSEXTO'.format(ano))
else:
print('O ano {} NรO รฉ BISSEXTO'.format(ano))
|
#!/usr/bin/env python3
import rospy
from nav_msgs.msg import Odometry
import rogata_library as rgt
import numpy as np
def odom_callback( odom, argv):
agent = argv[0]
rogata = argv[1]
pos = np.array([odom.pose.pose.position.x,
-odom.pose.pose.position.y])*100+np.array([500,500])
rogata.set_pos(agent,pos)
if __name__ == '__main__':
rospy.init_node("agent_tracker")
try:
rogata1 = rgt.rogata_helper()
rogata2 = rgt.rogata_helper()
rospy.Subscriber("guard/odom" , Odometry, odom_callback,("guard_obj",rogata1))
rospy.Subscriber("evader/odom", Odometry, odom_callback, ("evader_obj",rogata2))
rospy.spin()
except rospy.ROSInterruptException:
pass
|
from gym.envs.registration import register
register(
id='correlatedbandit-v0',
entry_point='bandit_env.envs:CorrelatedBanditEnv',
max_episode_steps=100,
nondeterministic=True,
kwargs={'prob': 0.1}
)
|
import logging
import kombu
from kombu import Connection, Producer, Exchange, Consumer
from mylogger import *
#from syslog_wrap import *
try:
import json
except:
import simplejson as json
import Queue
import threading
from statsd_operator import statsd_operator
#g_logger will be initlized in main thread
g_logger = mylogger()
g_logger_info = mylogger()
'''
thunder.cas.submit_tasks : total num of query broker's submit tasks
thunder.cas.download_tasks : total num of download tasks
'''
g_statsd = None
SUBMIT_TASKS = 'thunder.cas.submit_tasks'
DOWNLOAD_TASKS = 'thunder.cas.download_tasks'
task_queue = Queue.Queue()
task_mutex = threading.Lock()
task_logs = 'task_log_list'
class safe_lock(object):
def __enter__(self):
task_mutex.acquire()
def __exit__(self, *args):
task_mutex.release()
def get_external_id(task):
try:
if isinstance(task, dict):
dict_task = task
else:
dict_task = json.loads(task)
if dict_task['params'].has_key('external_id'):
return dict_task['params']['external_id']
return None
except Exception, msg:
g_logger.debug('get external id failed [%s]' %(msg))
return None
def connect_rabbitmq(mq_url, queue_name, routing_key):
try:
exchange = Exchange(queue_name)
queue = kombu.Queue(queue_name, exchange, routing_key)
#connection = Connection('amqp://guest:guest@localhost:5672//')
connection = Connection(mq_url)
return connection
except Exception, msg:
raise
def redis_url_parse(url):
try:
redis_conf = {};
start_idx = url.find ('redis://');
if (not (start_idx == 0)):
raise Exception ("bad redis format (%s)" %url);
start_idx = len ('redis://');
end_idx = url.find (':', start_idx);
if (end_idx < 0):
raise Exception ("bad redis format (%s)" %url);
redis_conf['host'] = url[start_idx:end_idx];
start_idx = end_idx + 1;
end_idx = url.find ('/', start_idx);
if (end_idx < 0):
raise Exception ("bad redis format (%s)" %url);
redis_conf['port'] = int (url[start_idx:end_idx]);
start_idx = end_idx + 1;
redis_conf['db'] = url[start_idx:];
return redis_conf;
except Exception, msg:
raise
def create_error_msg(status, hashkey, task, redis_conn):
try:
s = {}
s['jsonrpc'] = '2.0'
s['id'] = 1
s['method'] = 'finish_task'
s['params'] = {}
s['params']['error_code'] = 121509
s['params']['message'] = status
if hashkey is not None:
task_key = 'task#' + hashkey
task_info = redis_conn.get(task_key)
if task_info is None:
return None
j_task_info = json.loads(task_info)
else:
if task is not None:
j_task_info = json.loads(task)
if j_task_info['params'].has_key('additional_info'):
s['params']['additional_info'] = j_task_info['params']['additional_info']
if j_task_info['params'].has_key('url'):
if j_task_info['params']['url'].has_key('hash'):
s['params']['url'] = {}
s['params']['url']['hash'] = j_task_info['params']['url']['hash']
s['params']['url']['location'] = j_task_info['params']['url']['location']
if j_task_info['params'].has_key('seed_file'):
if j_task_info['params']['seed_file'].has_key('hash'):
s['params']['seed_file'] = {}
s['params']['seed_file']['hash'] = j_task_info['params']['seed_file']['hash']
s['params']['seed_file']['path'] = j_task_info['params']['seed_file']['path']
if j_task_info['params'].has_key('thunder_hash'):
s['params']['thunder_hash'] = j_task_info['params']['thunder_hash']
js = json.dumps(s)
return js
except Exception, msg:
g_logger.error('create error msg failed [%s]' %(msg))
return None
|
import unittest
from eating_cookies import eating_cookies
class Test(unittest.TestCase):
def test_eating_cookies_small_n(self):
self.assertEqual(eating_cookies(0), 1)
self.assertEqual(eating_cookies(1), 1)
self.assertEqual(eating_cookies(2), 2)
self.assertEqual(eating_cookies(5), 13)
self.assertEqual(eating_cookies(10), 274)
def test_eating_cookies_large_n(self):
self.assertEqual(eating_cookies(50), 10562230626642)
self.assertEqual(eating_cookies(100), 180396380815100901214157639)
self.assertEqual(eating_cookies(500), 1306186569702186634983475450062372018715120191391192207156664343051610913971927959744519676992404852130396504615663042713312314219527)
if __name__ == '__main__':
unittest.main()
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import time
from neutron_lib import constants as nl_constants
import paramiko
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
from vmware_nsx_tempest.common import constants
from vmware_nsx_tempest.services import fwaas_client as FWAASC
from vmware_nsx_tempest.services import nsxv_client
from vmware_nsx_tempest.tests.nsxv.scenario import (
manager_topo_deployment as dmgr)
from vmware_nsx_tempest.tests.nsxv.scenario import (
network_addon_methods as NAM)
CONF = config.CONF
class FWaaSTestBasicOps(dmgr.TopoDeployScenarioManager):
"""
Tests the following scenario cases for FWaaS:
Add ICMP FWAAS rule and check north south traffic
Add TCP FWAAS rule and check north south traffic
Update ICMP FWAAS rule and check north south traffic
Update TCP FWAAS rule and check north south traffic
Check above scenario's with exclusive and distributed router
"""
@classmethod
def resource_setup(cls):
super(FWaaSTestBasicOps, cls).resource_setup()
cls.fwaasv1_client = FWAASC.get_client(cls.manager)
if not test.is_extension_enabled('fwaas', 'network'):
msg = "FWaaS Extension not enabled."
raise cls.skipException(msg)
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
CONF.nsxv.manager_uri).group(0)
cls.vsm = nsxv_client.VSMClient(
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
cls.fw_rule = cls.fwaasv1_client.create_firewall_rule(action="allow",
protocol="tcp")
cls.fw_policy = cls.fwaasv1_client.create_firewall_policy()
def create_firewall_rule(self, **kwargs):
body = self.fwaasv1_client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
**kwargs)
fw_rule = body['firewall_rule']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.fwaasv1_client.delete_firewall_rule,
fw_rule['id'])
return fw_rule
def create_firewall_policy(self, **kwargs):
body = self.fwaasv1_client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"),
**kwargs)
fw_policy = body['firewall_policy']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.fwaasv1_client.delete_firewall_policy,
fw_policy['id'])
return fw_policy
def delete_firewall_and_wait(self, firewall_id):
self.fwaasv1_client.delete_firewall(firewall_id)
self._wait_firewall_while(firewall_id, [nl_constants.PENDING_DELETE],
not_found_ok=True)
def create_firewall(self, **kwargs):
body = self.fwaasv1_client.create_firewall(
name=data_utils.rand_name("fw"),
**kwargs)
fw = body['firewall']
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.delete_firewall_and_wait,
fw['id'])
return fw
def check_server_connected(self, serv):
# Fetch tenant-network from where vm deployed
serv_net = list(serv['addresses'].keys())[0]
serv_addr = serv['addresses'][serv_net][0]
host_ip = serv_addr['addr']
self.waitfor_host_connected(host_ip)
def _wait_firewall_while(self, firewall_id, statuses, not_found_ok=False):
start = int(time.time())
if not_found_ok:
expected_exceptions = (lib_exc.NotFound)
else:
expected_exceptions = ()
while True:
try:
fw = self.fwaasv1_client.show_firewall(firewall_id)
except expected_exceptions:
break
status = fw['firewall']['status']
if status not in statuses:
break
if int(time.time()) - start >= self.fwaasv1_client.build_timeout:
msg = ("Firewall %(firewall)s failed to reach "
"non PENDING status (current %(status)s)") % {
"firewall": firewall_id,
"status": status,
}
raise lib_exc.TimeoutException(msg)
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
def _wait_firewall_ready(self, firewall_id):
self._wait_firewall_while(firewall_id,
[nl_constants.PENDING_CREATE,
nl_constants.PENDING_UPDATE])
def _delete_router_if_exists(self, router):
# delete router, if it exists
try:
routers_client = self.manager.routers_client
routers_client.delete_router(router['id'])
# if router is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _delete_policy_if_exists(self, policy_id):
# delete policy, if it exists
try:
self.fwaasv1_client.delete_firewall_policy(policy_id)
# if policy is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _delete_rule_if_exists(self, rule_id):
# delete rule, if it exists
try:
self.fwaasv1_client.delete_firewall_rule(rule_id)
# if rule is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _delete_firewall_if_exists(self, fw_id):
# delete firewall, if it exists
try:
self.fwaasv1_client.delete_firewall(fw_id)
# if firewall is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
self.fwaasv1_client.wait_for_resource_deletion(fw_id)
def _wait_until_ready(self, fw_id):
target_states = ('ACTIVE', 'CREATED')
def _wait():
firewall = self.fwaasv1_client.show_firewall(fw_id)
firewall = firewall['firewall']
return firewall['status'] in target_states
if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
CONF.network.build_interval):
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
(fw_id, target_states))
raise lib_exc.TimeoutException(m)
def _wait_until_deleted(self, fw_id):
def _wait():
try:
firewall = self.fwaasv1_client.show_firewall(fw_id)
except lib_exc.NotFound:
return True
fw_status = firewall['firewall']['status']
if fw_status == 'ERROR':
raise lib_exc.DeleteErrorException(resource_id=fw_id)
if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
CONF.network.build_interval):
m = ("Timed out waiting for firewall %s deleted" % fw_id)
raise lib_exc.TimeoutException(m)
def _check_firewall_rule_exists_at_backend(self, rules,
firewall_rule_name):
for rule in rules:
if rule['name'] in firewall_rule_name:
self.assertIn(rule['name'], firewall_rule_name)
return True
return False
def _test_ping_from_external_network(self, fip_ip):
out = os.popen('ping -c 2 %s' % fip_ip).read().strip()
return out
def _test_ssh_connectivity_from_external_network(self, fip_ip):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
try:
ssh.connect(fip_ip, self.username, self.password, timeout=10)
except Exception as e:
return str(e)
def _create_firewall_rule_name(self, body):
firewall_rule_name = body['firewall_rule']['name']
firewall_rule_name = "Fwaas-" + firewall_rule_name
return firewall_rule_name
def _create_firewall_advanced_topo(self, router_type):
fw_rule_id_list = []
router = self.create_router_by_type(router_type)
self.addCleanup(self._delete_router_if_exists, router)
edges = self.vsm.get_all_edges()
for key in edges:
if router['name'] in key['name']:
edge_id = key['id']
break
rules = self.vsm.get_edge_firewall_rules(edge_id)
rules_before = len(rules)
for rule_id in range(0, constants.NO_OF_ENTRIES):
if rule_id % 2 == 0:
action = "allow"
protocol = "tcp"
else:
action = "allow"
protocol = "udp"
firewall_rule = self.fwaasv1_client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action=action,
protocol=protocol)
fw_rule_id = firewall_rule['firewall_rule']['id']
firewall_name = self._create_firewall_rule_name(firewall_rule)
self.addCleanup(self._delete_rule_if_exists, fw_rule_id)
fw_rule_id_list.append(fw_rule_id)
# Update firewall policy
body = self.fwaasv1_client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._delete_policy_if_exists, fw_policy_id)
# Insert rule to firewall policy
for fw_rule_id in fw_rule_id_list:
self.fwaasv1_client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id, '', '')
firewall_1 = self.fwaasv1_client.create_firewall(
name=data_utils.rand_name("firewall"),
firewall_policy_id=fw_policy_id,
router_ids=[router['id']])
created_firewall = firewall_1['firewall']
self.addCleanup(self._delete_firewall_if_exists,
created_firewall['id'])
# Wait for the firewall resource to become ready
self._wait_until_ready(created_firewall['id'])
firewall_topo = dict(router=router, firewall_name=firewall_name,
fw_policy_id=fw_policy_id,
firewall_id=created_firewall['id'],
rules_before=rules_before)
return firewall_topo
def _create_firewall_basic_topo(self, router_type, protocol_name,
policy=None):
self.keypairs = {}
router = self.create_router_by_type(router_type)
self.addCleanup(self._delete_router_if_exists, router)
body = self.fwaasv1_client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol=protocol_name)
fw_rule_id1 = body['firewall_rule']['id']
firewall_name = self._create_firewall_rule_name(body)
self.addCleanup(self._delete_rule_if_exists, fw_rule_id1)
# Create firewall policy
if not policy:
body = self.fwaasv1_client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._delete_policy_if_exists, fw_policy_id)
# Insert rule to firewall policy
self.fwaasv1_client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id1, '', '')
else:
fw_policy_id = policy
# Create firewall
firewall_1 = self.fwaasv1_client.create_firewall(
name=data_utils.rand_name("firewall"),
firewall_policy_id=fw_policy_id,
router_ids=[router['id']])
created_firewall = firewall_1['firewall']
self.addCleanup(self._delete_firewall_if_exists,
created_firewall['id'])
# Wait for the firewall resource to become ready
self._wait_until_ready(created_firewall['id'])
sg_name = data_utils.rand_name('sg')
sg_desc = sg_name + " description"
t_security_group = \
self.compute_security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.addCleanup(
test_utils.call_and_ignore_notfound_exc,
self.compute_security_groups_client.delete_security_group,
t_security_group['id'])
rule = {'direction': 'ingress', 'protocol': 'tcp'}
self._create_security_group_rule(secgroup=t_security_group, **rule)
rule = {'direction': 'ingress'}
rule_id = self._create_security_group_rule(secgroup=t_security_group,
**rule)['id']
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
client_mgr = self.manager
tenant_id = t_security_group['tenant_id']
network, subnet = self.create_network_subnet(client_mgr=client_mgr,
tenant_id=tenant_id,
cidr_offset=0)
subnet_id = subnet['id']
router_id = router['id']
routers_client = client_mgr.routers_client
NAM.router_interface_add(self, router_id, subnet_id,
routers_client)
self.username, self.password = self.get_image_userpass()
security_groups = [{'name': t_security_group['id']}]
key_name = keypair['name']
t_serv1 = self.create_server_on_network(
network, security_groups, key_name=key_name,
image=self.get_server_image(),
flavor=self.get_server_flavor(),
name=network['name'])
self.check_server_connected(t_serv1)
t_floatingip = self.create_floatingip_for_server(
t_serv1, client_mgr=client_mgr)
msg = ("Associate t_floatingip[%s] to server[%s]"
% (t_floatingip, t_serv1['name']))
self._check_floatingip_connectivity(
t_floatingip, t_serv1, should_connect=True, msg=msg)
firewall_topo = dict(router=router, firewall_name=firewall_name,
fw_policy_id=fw_policy_id,
fw_rule_id1=fw_rule_id1,
firewall_id=created_firewall['id'],
security_group=t_security_group,
network=network, subnet=subnet,
client_mgr=client_mgr, serv1=t_serv1,
fip1=t_floatingip,
rule_id=rule_id)
return firewall_topo
def _perform_operations_on_firewall(self, firewall_topo, protocol_name):
self._check_floatingip_connectivity(
firewall_topo['fip1'], firewall_topo['serv1'],
should_connect=True)
firewall_rule_2 = self.fwaasv1_client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="deny",
protocol=protocol_name)
fw_rule_id2 = firewall_rule_2['firewall_rule']['id']
self.addCleanup(self._delete_rule_if_exists, fw_rule_id2)
self.addCleanup(self._delete_policy_if_exists,
firewall_topo['fw_policy_id'])
self.addCleanup(self._delete_firewall_if_exists,
firewall_topo['firewall_id'])
# Insert rule-2 to firewall policy
self.fwaasv1_client.insert_firewall_rule_in_policy(
firewall_topo['fw_policy_id'], fw_rule_id2, '',
firewall_topo['fw_rule_id1'])
self._wait_firewall_ready(firewall_topo['firewall_id'])
return fw_rule_id2
def _get_list_fw_rule_ids(self, fw_policy_id):
fw_policy = self.fwaasv1_client.show_firewall_policy(
fw_policy_id)
return [ruleid for ruleid in fw_policy['firewall_policy']
['firewall_rules']]
def create_router_by_type(self, router_type, name=None, **kwargs):
routers_client = self.manager.routers_client
router_name = name or data_utils.rand_name('fwaas-')
create_kwargs = dict(name=router_name, external_gateway_info={
"network_id": CONF.network.public_network_id})
if router_type in ('shared', 'exclusive'):
create_kwargs['router_type'] = router_type
elif router_type in ('distributed'):
create_kwargs['distributed'] = True
kwargs.update(create_kwargs)
router = routers_client.create_router(**kwargs)
router = router['router'] if 'router' in router else router
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
routers_client.delete_router, router['id'])
self.assertEqual(router['name'], router_name)
return router
@decorators.attr(type='nsxv')
@decorators.idempotent_id('e2ab2d1a-4dc0-4efd-b03d-8c2322b427f0')
def test_firewall_icmp_rule_with_exclusive_router(self):
# Create router required for an ACTIVE firewall
firewall_topo = \
self._create_firewall_basic_topo(constants.EXCLUSIVE_ROUTER,
constants.ICMP_PROTOCOL)
fip_ip = firewall_topo['fip1']['floating_ip_address']
self._perform_operations_on_firewall(firewall_topo,
constants.ICMP_PROTOCOL)
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("0 received", str(out))
@decorators.attr(type='nsxv')
@decorators.idempotent_id('fd39455a-232e-4f7f-b102-2853688335dc')
def test_firewall_tcp_rule_with_exclusive_router(self):
# Create router required for an ACTIVE firewall
firewall_topo = \
self._create_firewall_basic_topo(constants.EXCLUSIVE_ROUTER,
constants.TCP_PROTOCOL)
fip_ip = firewall_topo['fip1']['floating_ip_address']
self._perform_operations_on_firewall(firewall_topo,
constants.TCP_PROTOCOL)
out = self._test_ssh_connectivity_from_external_network(fip_ip)
self.assertIn("Servname not supported", out)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('3628448a-5977-44e3-b34a-690e4e2ba847')
def test_firewall_icmp_rule_with_distributed_router(self):
# Create router required for an ACTIVE firewall
firewall_topo = \
self._create_firewall_basic_topo(constants.DISTRIBUTED_ROUTER,
constants.ICMP_PROTOCOL)
fip_ip = firewall_topo['fip1']['floating_ip_address']
self._perform_operations_on_firewall(firewall_topo,
constants.ICMP_PROTOCOL)
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("0 received", str(out))
@decorators.attr(type='nsxv')
@decorators.idempotent_id('0aeb2acc-0b68-4cca-889d-078f61bbe5b2')
def test_firewall_tcp_rule_with_distributed_router(self):
# Create router required for an ACTIVE firewall
firewall_topo = \
self._create_firewall_basic_topo(constants.DISTRIBUTED_ROUTER,
constants.TCP_PROTOCOL)
fip_ip = firewall_topo['fip1']['floating_ip_address']
self._perform_operations_on_firewall(firewall_topo,
constants.TCP_PROTOCOL)
out = self._test_ssh_connectivity_from_external_network(fip_ip)
self.assertIn("Servname not supported", out)
@decorators.attr(type='nsxv')
@decorators.idempotent_id('4a0306e5-663c-4981-8177-e8a255a8859c')
def test_firewall_update_delete_ops_on_exclusive_router(self):
# Create router required for an ACTIVE firewall
firewall_topo = \
self._create_firewall_basic_topo(constants.EXCLUSIVE_ROUTER,
constants.ICMP_PROTOCOL)
firewall_rule_id = \
self._perform_operations_on_firewall(firewall_topo,
constants.ICMP_PROTOCOL)
fip_ip = firewall_topo['fip1']['floating_ip_address']
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("0 received", str(out))
self.fwaasv1_client.update_firewall_rule(
firewall_rule_id,
action="allow")
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("64 bytes from ", str(out))
self.fwaasv1_client.update_firewall_rule(
firewall_rule_id, protocol="tcp",
action="deny")
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
out = self._test_ssh_connectivity_from_external_network(fip_ip)
self.assertIn("Servname not supported", out)
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("64 bytes from ", str(out))
self.fwaasv1_client.update_firewall_rule(
firewall_rule_id, action="allow")
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
out = self._test_ssh_connectivity_from_external_network(fip_ip)
self._wait_firewall_ready(firewall_topo['firewall_id'])
out = self._test_ping_from_external_network(fip_ip)
self.assertIn("64 bytes from ", str(out))
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Stanford PLY format (alternative)",
"author": "Paul Melis",
"blender": (2, 75, 0),
"location": "File > Import-Export",
"description": "Import-Export PLY mesh data, including vertex colors",
"warning": "",
"wiki_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
import os, time
import bpy
from bpy.props import (
CollectionProperty,
StringProperty,
BoolProperty,
EnumProperty,
FloatProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
orientation_helper_factory,
axis_conversion,
)
#IOPLYOrientationHelper = orientation_helper_factory("IOPLYOrientationHelper", axis_forward='Y', axis_up='Z')
from readply import readply
def load_ply_mesh(filepath, ply_name):
# XXX call needs update for new API!
num_vertices, num_faces, varray, farray, vnarray, vcolarray = readply(filepath)
# Create a mesh + object using the binary vertex and face data
mesh = bpy.data.meshes.new(name=ply_name)
mesh.vertices.add(num_vertices)
mesh.vertices.foreach_set('co', varray)
mesh.tessfaces.add(num_faces)
mesh.tessfaces.foreach_set('vertices_raw', farray)
mesh.validate()
mesh.update()
if vcolarray is not None:
"""
# For each face, set the vertex colors of the vertices making up that face
for fi in range(num_faces):
# Get vertex indices for this triangle/quad
i, j, k, l = farray[4*fi:4*fi+4]
face_col = vcol_data[fi]
face_col.color1 = vcolarray[3*i:3*i+3]
face_col.color2 = vcolarray[3*j:3*j+3]
face_col.color3 = vcolarray[3*k:3*k+3]
if l != 0:
face_col.color4 = vcolarray[3*l:3*l+3]
"""
vcol_layer = mesh.vertex_colors.new()
vcol_data = vcol_layer.data
vcol_data.foreach_set('color', vcolarray)
if vnarray is not None:
print('Warning: NOT applying vertex normals (yet)')
mesh.validate()
mesh.update()
return mesh
def load_ply(context, filepath):
t = time.time()
ply_name = bpy.path.display_name_from_filepath(filepath)
mesh = load_ply_mesh(filepath, ply_name)
scn = bpy.context.scene
obj = bpy.data.objects.new(ply_name, mesh)
scn.objects.link(obj)
scn.objects.active = obj
obj.select = True
print('\nSuccessfully imported %r in %.3f sec' % (filepath, time.time() - t))
class ImportPLY(bpy.types.Operator, ImportHelper):
"""Load a PLY geometry file"""
bl_idname = "import_mesh.ply2"
bl_label = "Import PLY (alternative)"
bl_description = 'Alternative importer for PLY files'
bl_options = {'UNDO'}
files = CollectionProperty(
name="File Path",
description="File path used for importing the PLY file",
type=bpy.types.OperatorFileListElement
)
directory = StringProperty()
filename_ext = ".ply"
filter_glob = StringProperty(default="*.ply", options={'HIDDEN'})
def execute(self, context):
paths = [os.path.join(self.directory, name.name)
for name in self.files]
if not paths:
paths.append(self.filepath)
for path in paths:
load_ply(context, path)
bpy.context.scene.update()
return {'FINISHED'}
def menu_func_import(self, context):
self.layout.operator(ImportPLY.bl_idname, text="Stanford PLY [ALTERNATIVE] (.ply)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
if __name__ == "__main__":
register()
|
#! /usr/bin/env python
import time
import pyslet.odata2.csdl as edm
from pyslet.wsgi import WSGIDataApp
class MyApp(WSGIDataApp):
settings_file = 'samples/wsgi_data/settings.json'
def init_dispatcher(self):
super(MyApp, self).init_dispatcher()
self.set_method("/*", self.home)
def home(self, context):
path = context.environ.get('PATH_INFO', '')
with self.container['Hits'].open() as collection:
ntries = 0
while ntries < 5:
try:
hit = collection[path]
collection.update_entity(hit)
break
except KeyError:
try:
hit = collection.new_entity()
hit.set_key(path)
collection.insert_entity(hit)
break
except edm.ConstraintError:
# possible race condition, concurrency failure
time.sleep(1)
ntries += 1
data = ("<html><head><title>Hit Count</title></head>"
"<body><p>Your are hit number: %i</p></body></html>" %
hit['Count'].value)
context.set_status(200)
context.add_header("Cache-Control", "no-cache")
return self.html_response(context, data)
if __name__ == "__main__":
MyApp.main()
|
import tensorflow as tf
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import initializers, regularizers, constraints
from tensorflow.python.keras.backend import _preprocess_padding
from tensorflow.python.keras.layers import Conv2D, Add
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras.utils import conv_utils
from utils import he_init, glorot_init
class ConditionalCenterScale(Layer):
def __init__(self,
number_of_classes,
axis=-1,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(ConditionalCenterScale, self).__init__(**kwargs)
self.number_of_classes = number_of_classes
self.supports_masking = True
self.axis = axis
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape[0])
cls = input_shape[1]
if len(cls) != 2:
raise ValueError("Classes should be one dimensional")
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
if self.axis is None:
shape = (self.number_of_classes, 1)
else:
shape = (self.number_of_classes, input_shape[0][self.axis])
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
super(ConditionalCenterScale, self).build(input_shape)
def call(self, inputs, training=None):
class_labels = K.squeeze(inputs[1], axis=1)
inputs = inputs[0]
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
normed = inputs
broadcast_shape = [1] * len(input_shape)
broadcast_shape[0] = K.shape(inputs)[0]
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(K.gather(self.gamma, class_labels), broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(K.gather(self.beta, class_labels), broadcast_shape)
normed = normed + broadcast_beta
return normed
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'axis': self.axis,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(ConditionalCenterScale, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CenterScale(Layer):
def __init__(self,
axis=-1,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(CenterScale, self).__init__(**kwargs)
self.axis = axis
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = input_shape
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
if self.axis is None:
shape = (1, )
else:
shape = (input_shape[self.axis], )
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
super(CenterScale, self).build(input_shape)
def call(self, inputs, training=None):
inputs = inputs
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
normed = inputs
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(CenterScale, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalConv11(Layer):
def __init__(self, filters,
number_of_classes,
strides=1,
group=1,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
triangular=False,
**kwargs):
super(ConditionalConv11, self).__init__(**kwargs)
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple((1, 1), 2, 'kernel_size')
self.number_of_classes = number_of_classes
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding('same')
self.group = group
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(1, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.triangular = triangular
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[0][channel_axis].value
assert (input_dim % self.group == 0), 'group incorrect!'
self.m_per_group = input_dim // self.group
self.input_dim = input_dim
kernel_shape = (self.number_of_classes,) + self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.number_of_classes, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
super(ConditionalConv11, self).build(input_shape)
def call(self, inputs):
cls = inputs[1]
x = inputs[0]
### Preprocess input
# (bs, w, h, c)
if self.data_format != 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
_, in_c, w, h = K.int_shape(x)
else:
_, w, h, in_c = K.int_shape(x)
# (bs, c, w, h)
x = tf.reshape(x, (-1, in_c, w * h))
# (bs, c, w*h)
x = tf.transpose(x, [0, 2, 1])
# (bs, w*h, c)
### Preprocess filter
cls = tf.squeeze(cls, axis=1)
# (num_cls, 1, 1, in, out)
if self.triangular:
kernel = tf.matrix_band_part(self.kernel, 0, -1)
else:
kernel = self.kernel
kernel = tf.gather(kernel, cls)
# (bs, 1, 1, in, out)
kernel = tf.squeeze(kernel, axis=1)
kernel = tf.squeeze(kernel, axis=1)
# print (K.int_shape(kernel))
# (in, 1, bs, out)
# print (K.int_shape(kernel))
output = tf.matmul(x, kernel)
# (bs, w*h, out)
### Deprocess output
output = tf.transpose(output, [0, 2, 1])
# (bs, out, w * h)
output = tf.reshape(output, (-1, self.filters, w, h))
# (bs, out, w, h)
if self.bias is not None:
# (num_cls, out)
bias = tf.gather(self.bias, cls)
# (bs, bias)
bias = tf.expand_dims(bias, axis=-1)
bias = tf.expand_dims(bias, axis=-1)
# (bs, bias, 1, 1)
output += bias
if self.data_format != 'channels_first':
# (bs, out, w, h)
output = tf.transpose(output, [0, 2, 3, 1])
if self.activation is not None:
return self.activation(output)
return output
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'rank': 2,
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ConditionalConv11, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FactorizedConv11(Layer):
def __init__(self, filters,
number_of_classes,
filters_emb,
strides=1,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(FactorizedConv11, self).__init__(**kwargs)
self.filters = filters
self.filters_emb = filters_emb
self.kernel_size = conv_utils.normalize_tuple((1, 1), 2, 'kernel_size')
self.number_of_classes = number_of_classes
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding('same')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(1, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[0][channel_axis].value
self.input_dim = input_dim
class_matrix_shape = (self.number_of_classes, self.filters_emb)
kernel_shape = (self.filters_emb, ) + self.kernel_size + (input_dim, self.filters)
self.class_matrix = self.add_weight(shape=class_matrix_shape,
initializer=self.kernel_initializer,
name='class_matrix')
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.number_of_classes, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
super(FactorizedConv11, self).build(input_shape)
def call(self, inputs):
cls = inputs[1]
x = inputs[0]
### Preprocess input
#(bs, w, h, c)
if self.data_format != 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
_, in_c, w, h = K.int_shape(x)
else:
_, w, h, in_c = K.int_shape(x)
#(bs, c, w, h)
x = tf.reshape(x, (-1, in_c, w * h))
#(bs, c, w*h)
x = tf.transpose(x, [0, 2, 1])
#(bs, w*h, c)
### Preprocess filter
cls = tf.squeeze(cls, axis=1)
#(num_cls, 1, 1, in, out)
cls_emb = tf.gather(self.class_matrix, cls)
cls_emb = K.l2_normalize(cls_emb, axis=1)
#(bs, filters_emb)
kernel = tf.reshape(self.kernel, (self.filters_emb, -1))
#(filters_emb, 1 * 1 * in * out)
kernel = tf.matmul(cls_emb, kernel)
#(bs, 1 * 1 * in * out)
kernel = tf.reshape(kernel, (-1, 1, 1, in_c, self.filters))
#(bs, 1, 1, in, out)
kernel = tf.squeeze(kernel, axis=1)
kernel = tf.squeeze(kernel, axis=1)
#print (K.int_shape(kernel))
#(in, 1, bs, out)
#print (K.int_shape(kernel))
output = tf.matmul(x, kernel)
#(bs, w*h, out)
### Deprocess output
output = tf.transpose(output, [0, 2, 1])
# (bs, out, w * h)
output = tf.reshape(output, (-1, self.filters, w, h))
# (bs, out, w, h)
if self.bias is not None:
#(num_cls, out)
bias = tf.gather(self.bias, cls)
#(bs, bias)
bias = tf.expand_dims(bias, axis=-1)
bias = tf.expand_dims(bias, axis=-1)
#(bs, bias, 1, 1)
output += bias
if self.data_format != 'channels_first':
#(bs, out, w, h)
output = tf.transpose(output, [0, 2, 3, 1])
if self.activation is not None:
return self.activation(output)
return output
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'rank': 2,
'filters': self.filters,
'filters_emb': self.filters_emb,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(FactorizedConv11, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class NINConv11(Layer):
def __init__(self, filters, locnet,
strides=1,
data_format=None,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(NINConv11, self).__init__(**kwargs)
self.filters = int(filters)
self.locnet = locnet
self.kernel_size = conv_utils.normalize_tuple((1, 1), 2, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding('same')
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(1, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[0][channel_axis]
self.input_dim = input_dim
# class_matrix_shape = (self.number_of_classes, self.filters_emb)
# kernel_shape = (self.filters_emb, ) + self.kernel_size + (input_dim, self.filters)
self.trainable_weights = self.locnet.trainable_weights
if self.use_bias:
self.bias = self.add_weight(shape=(self.filters, ),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
super(NINConv11, self).build(input_shape)
def call(self, inputs):
z = inputs[1]
x = inputs[0]
### Preprocess input
#(bs, w, h, c)
if self.data_format != 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
_, in_c, w, h = K.int_shape(x)
else:
_, w, h, in_c = K.int_shape(x)
#(bs, c, w, h)
x = tf.reshape(x, (-1, in_c, w * h))
#(bs, c, w*h)
x = tf.transpose(x, [0, 2, 1])
#(bs, w*h, c)
### Preprocess filter
kernel = self.locnet(z)
#(bs, 1 * 1 * in * out)
kernel = tf.reshape(kernel, (-1, 1, 1, in_c, self.filters))
#(bs, 1, 1, in, out)
kernel = tf.squeeze(kernel, axis=1)
kernel = tf.squeeze(kernel, axis=1)
#print (K.int_shape(kernel))
#(in, 1, bs, out)
#print (K.int_shape(kernel))
output = tf.matmul(x, kernel)
#(bs, w*h, out)
### Deprocess output
output = tf.transpose(output, [0, 2, 1])
# (bs, out, w * h)
output = tf.reshape(output, (-1, self.filters, w, h))
# (bs, out, w, h)
if self.bias is not None:
#(out, )
bias = tf.expand_dims(self.bias, axis=0)
bias = tf.expand_dims(bias, axis=-1)
bias = tf.expand_dims(bias, axis=-1)
#(1, bias, 1, 1)
output += bias
if self.data_format != 'channels_first':
#(bs, out, w, h)
output = tf.transpose(output, [0, 2, 3, 1])
if self.activation is not None:
return self.activation(output)
return output
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'rank': 2,
'filters': self.filters,
'filters_emb': self.filters_emb,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(NINConv11, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalConv2D(Layer):
def __init__(self, filters,
kernel_size,
number_of_classes,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ConditionalConv2D, self).__init__(**kwargs)
self.filters = int(filters)
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.number_of_classes = number_of_classes
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = input_shape[0][channel_axis]
kernel_shape = (self.number_of_classes, ) + self.kernel_size + (input_dim, self.filters)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.number_of_classes, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
super(ConditionalConv2D, self).build(input_shape)
def call(self, inputs):
def apply_separate_filter_for_each_batch(inputs):
kernel = inputs[1]
x = K.expand_dims(inputs[0], axis=0)
outputs = K.conv2d(
x,
kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.bias is not None:
bias = inputs[2]
outputs = K.bias_add(outputs, bias, data_format=self.data_format)
return K.squeeze(outputs, axis=0)
x = inputs[0]
classes = K.squeeze(inputs[1], axis=1)
if self.bias is not None:
outputs = K.map_fn(apply_separate_filter_for_each_batch,
[x, K.gather(self.kernel, classes), K.gather(self.bias, classes)], dtype='float32')
else:
outputs = K.map_fn(apply_separate_filter_for_each_batch,
[x, K.gather(self.kernel, classes)], dtype='float32')
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
if self.data_format == 'channels_first':
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return (input_shape[0], self.filters) + tuple(new_space)
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'rank': 2,
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ConditionalConv2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConditionalDepthwiseConv2D(Layer):
def __init__(self, filters,
kernel_size,
number_of_classes,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ConditionalDepthwiseConv2D, self).__init__(**kwargs)
self.filters = int(filters)
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.number_of_classes = number_of_classes
self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
def build(self, input_shape):
input_shape = input_shape[0]
if len(input_shape) < 4:
raise ValueError('Inputs to `SeparableConv2D` should have rank 4. '
'Received input shape:', str(input_shape))
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = 3
assert input_shape[channel_axis] == self.filters
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs to '
'`SeparableConv2D` '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
depthwise_kernel_shape = (self.number_of_classes,
self.kernel_size[0],
self.kernel_size[1],
input_dim)
self.kernel = self.add_weight(
shape=depthwise_kernel_shape,
initializer=self.kernel_initializer,
name='depthwise_kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.number_of_classes, self.filters),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
# Set input spec.
self.built = True
def call(self, inputs):
if self.data_format is None:
data_format = self.data_format
if self.data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
strides = (1,) + self.strides + (1,)
x = inputs[0]
cls = K.squeeze(inputs[1], axis=-1)
#Kernel preprocess
kernel = K.gather(self.kernel, cls)
#(bs, w, h, c)
kernel = tf.transpose(kernel, [1, 2, 3, 0])
#(w, h, c, bs)
kernel = K.reshape(kernel, (self.kernel_size[0], self.kernel_size[1], -1))
#(w, h, c * bs)
kernel = K.expand_dims(kernel, axis=-1)
#(w, h, c * bs, 1)
if self.data_format == 'channles_first':
x = tf.transpose(x, [0, 2, 3, 1])
bs, w, h, c = K.int_shape(x)
#(bs, w, h, c)
x = tf.transpose(x, [1, 2, 3, 0])
#(w, h, c, bs)
x = K.reshape(x, (w, h, -1))
#(w, h, c * bs)
x = K.expand_dims(x, axis=0)
#(1, w, h, c * bs)
padding = _preprocess_padding(self.padding)
outputs = tf.nn.depthwise_conv2d(x, kernel,
strides=strides,
padding=padding,
rate=self.dilation_rate)
#(1, w, h, c * bs)
_, w, h, _ = K.int_shape(outputs)
outputs = K.reshape(outputs, [w, h, self.filters, -1])
#(w, h, c, bs)
outputs = tf.transpose(outputs, [3, 0, 1, 2])
#(bs, w, h, c)
if self.bias is not None:
#(num_cls, out)
bias = tf.gather(self.bias, cls)
#(bs, bias)
bias = tf.expand_dims(bias, axis=1)
bias = tf.expand_dims(bias, axis=1)
#(bs, bias, 1, 1)
outputs += bias
if self.data_format == 'channles_first':
outputs = tf.transpose(outputs, [0, 3, 1, 2])
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
if self.data_format == 'channels_first':
rows = input_shape[2]
cols = input_shape[3]
elif self.data_format == 'channels_last':
rows = input_shape[1]
cols = input_shape[2]
rows = conv_utils.conv_output_length(rows, self.kernel_size[0],
self.padding,
self.strides[0])
cols = conv_utils.conv_output_length(cols, self.kernel_size[1],
self.padding,
self.strides[1])
if self.data_format == 'channels_first':
return (input_shape[0], self.filters, rows, cols)
elif self.data_format == 'channels_last':
return (input_shape[0], rows, cols, self.filters)
def get_config(self):
config = super(ConditionalDepthwiseConv2D, self).get_config()
config.pop('kernel_initializer')
config.pop('kernel_regularizer')
config.pop('kernel_constraint')
config['depth_multiplier'] = 1
config['depthwise_initializer'] = initializers.serialize(self.depthwise_initializer)
config['depthwise_regularizer'] = regularizers.serialize(self.depthwise_regularizer)
config['depthwise_constraint'] = constraints.serialize(self.depthwise_constraint)
return config
class ConditionalDense(Layer):
def __init__(self, units,
number_of_classes,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(ConditionalDense, self).__init__(**kwargs)
self.units = units
self.number_of_classes = number_of_classes
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
def build(self, input_shape):
input_shape = input_shape[0]
assert len(input_shape) >= 2
input_dim = input_shape[-1]
self.kernel = self.add_weight(shape=(self.number_of_classes, input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.bias = self.add_weight(shape=(self.number_of_classes, self.units),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs):
classes = K.squeeze(inputs[1], axis=1)
kernel = K.gather(self.kernel, classes)
#(bs, in, out)
x = K.expand_dims(inputs[0], axis=1)
#(bs, 1, in)
output = tf.matmul(x, kernel)
#(bs, 1, out)
output = K.squeeze(output, axis=1)
#(bs, out)
if self.bias is not None:
b = K.gather(self.bias, classes)
output += b
if self.activation is not None:
return self.activation(output)
return output
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
assert input_shape and len(input_shape) >= 2
assert input_shape[-1]
output_shape = list(input_shape)
output_shape[-1] = self.units
return tuple(output_shape)
def get_config(self):
config = {
'number_of_classes': self.number_of_classes,
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ConditionalDense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_separable_conv(cls, number_of_classes, conv11_layer=Conv2D,
conv_layer=ConditionalDepthwiseConv2D, conditional_conv11=False,
conditional_conv=False, **kwargs):
def layer(x):
ch_out = kwargs['filters']
ch_in = K.int_shape(x)[1 if K.image_data_format() == 'channels_first' else -1]
if ch_in != ch_out:
if conditional_conv11:
out = conv11_layer(filters=ch_out, kernel_initializer=glorot_init,
number_of_classes=number_of_classes, name=kwargs['name'] + '-preprocess_part')([x, cls])
else:
out = conv11_layer(filters=ch_out, kernel_initializer=glorot_init, name=kwargs['name'] + '-preprocess_part')
else:
out = x
if conditional_conv:
out = conv_layer(number_of_classes=number_of_classes, filters=ch_out,
kernel_size=kwargs['kernel_size'], padding='same',
name=kwargs['name'] + '-depthwise_part')([out, cls])
else:
out = conv_layer(filters=ch_out,
kernel_size=kwargs['kernel_size'], padding='same',
name=kwargs['name'] + '-depthwise_part')(out)
if conditional_conv11:
out = conv11_layer(number_of_classes=number_of_classes,
filters=ch_out, kernel_initializer=glorot_init,
name=kwargs['name'] + '-conv11_part')([out, cls])
else:
out = conv11_layer(filters=ch_out, kernel_initializer=glorot_init,
name=kwargs['name'] + '-conv11_part')(out)
return out
return layer
def get_separable_conditional_conv(cls, number_of_classes, conv_layer=Conv2D,
conditional_conv_layer=ConditionalConv11, **kwargs):
def layer(x):
ch_out = kwargs['filters']
ch_in = K.int_shape(x)[1 if K.image_data_format() == 'channels_first' else -1]
out = conv_layer(filters=ch_in, kernel_size=kwargs['kernel_size'], padding='same', kernel_initializer=he_init,
name=kwargs['name'] + '-u_part')(x)
if ch_in != ch_out:
out_u = conv_layer(filters=ch_out, kernel_size=(1, 1),
kernel_initializer=glorot_init, name=kwargs['name'] + '-pr_part')(out)
else:
out_u = out
out_c = conditional_conv_layer(number_of_classes=number_of_classes, filters=ch_out,
kernel_initializer=glorot_init, name=kwargs['name'] + '-c_part')([out, cls])
return Add()([out_u, out_c])
return layer
|
EXTERNAL_API_URL = 'https://quoters.apps.pcfone.io/api/random'
# LocalHost
# POSTGRES_CONNECTION_STRING = 'postgresql://postgres:postgres@localhost/postgres'
# Docker
POSTGRES_CONNECTION_STRING = 'postgresql://postgres:postgres@postgres_database/postgres'
SQL_SERVER_CONNECTION_STRING_TEMPLATE = f'''
DRIVER={{ODBC Driver 17 for SQL Server}};
SERVER=YOUR_SERVER_ADDRESS_HERE;
DATABASE=YOUR_DATABASE_NAME_HERE;
Uid=YOUR_USER_NAME_HERE;
Pwd=YOUR_PASSWORD_HERE;
Encrypt=no;
'''
SQL_SERVER_CONNECTION_STRING = f''
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import numpy as np
from brainstorm.handlers.base_handler import Handler
# ############################## Debug Array ################################ #
class DebugArray(object):
def __init__(self, arr):
assert arr is not None
self.shape = arr.shape
self.array = arr
self.size = self.array.size
def __getitem__(self, item):
if isinstance(item, (int, slice)):
item = tuple([item])
assert isinstance(item, tuple)
for i in item:
assert isinstance(i, (int, slice))
if isinstance(i, slice):
assert i.step is None
return DebugArray(arr=self.array.__getitem__(item))
def reshape(self, new_shape):
if isinstance(new_shape, (tuple, list)):
assert all([t >= 0 for t in tuple(new_shape)])
else:
assert isinstance(new_shape, int)
assert new_shape >= 0
return DebugArray(arr=self.array.reshape(new_shape))
def _check_for_inf(handler, arg, name):
if isinstance(arg, (int, float)) and not np.isfinite(arg):
raise ValueError('NaN or Inf encountered in "{}" argument'
.format(name))
if isinstance(arg, DebugArray) and not handler.is_fully_finite(arg):
raise ValueError('NaN or Inf encountered in "{}"'.format(name))
def check_for_inf_or_nan(f):
def checked_f(*args, **kwargs):
result = f(*args, **kwargs)
handler = args[0]
for i, arg in enumerate(args, start=1):
_check_for_inf(handler, arg, '{}.'.format(i))
for n, v in kwargs.items():
_check_for_inf(handler, v, n)
return result
return checked_f
# ############################# Debug Handler ############################### #
class DebugHandler(Handler):
__undescribed__ = {'EMPTY', 'array_type'}
def __init__(self, handler):
super(DebugHandler, self).__init__()
self.handler = handler
self.EMPTY = DebugArray(arr=handler.EMPTY)
self.array_type = DebugArray
def __init_from_description__(self, description):
self.__init__(self.handler)
# ------------------------- Allocate new memory ------------------------- #
def allocate(self, shape):
assert_is_shape(shape)
return DebugArray(self.handler.allocate(shape))
def ones(self, shape):
assert_is_shape(shape)
return DebugArray(self.handler.ones(shape))
def zeros(self, shape):
assert_is_shape(shape)
return DebugArray(self.handler.zeros(shape))
# ---------------------------- Copy and Fill ---------------------------- #
@check_for_inf_or_nan
def copy_to(self, src, dest):
assert_debug_arrays(dest, src)
assert_shapes_equal(dest, src)
assert dest.size == src.size, "{} != {}".format(dest.size, src.size)
self.handler.copy_to(src.array, dest.array)
@check_for_inf_or_nan
def copy_to_if(self, src, dest, cond):
assert_debug_arrays(src, dest, cond)
assert_shapes_equal(src, dest, cond)
self.handler.copy_to_if(src.array, dest.array, cond.array)
@check_for_inf_or_nan
def create_from_numpy(self, arr):
assert isinstance(arr, np.ndarray)
return DebugArray(self.handler.create_from_numpy(arr))
@check_for_inf_or_nan
def fill(self, mem, val):
assert_debug_arrays(mem)
assert_is_scalar(val)
self.handler.fill(mem.array, val)
@check_for_inf_or_nan
def fill_if(self, mem, val, cond):
assert_is_scalar(val)
assert_debug_arrays(mem, cond)
assert_shapes_equal(mem, cond)
self.handler.fill_if(mem.array, val, cond.array)
@check_for_inf_or_nan
def get_numpy_copy(self, mem):
assert_debug_arrays(mem)
return self.handler.get_numpy_copy(mem.array)
@check_for_inf_or_nan
def set_from_numpy(self, mem, arr):
assert_debug_arrays(mem)
assert isinstance(arr, np.ndarray)
assert mem.shape == arr.shape, \
"{} != {}".format(mem.shape, arr.shape)
self.handler.set_from_numpy(mem.array, arr)
# ---------------------------- Debug helpers ---------------------------- #
def is_fully_finite(self, a):
return self.handler.is_fully_finite(a.array)
# ----------------------- Mathematical operations ----------------------- #
@check_for_inf_or_nan
def abs_t(self, a, out):
assert_debug_arrays(a, out)
assert_shapes_equal(a, out)
self.handler.abs_t(a.array, out.array)
@check_for_inf_or_nan
def add_into_if(self, a, out, cond):
assert_debug_arrays(a, out, cond)
assert_shapes_equal(a, out, cond)
self.handler.add_into_if(a.array, out.array, cond.array)
@check_for_inf_or_nan
def add_mv(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
assert v.shape == (m.shape[0], 1) or v.shape == (1, m.shape[1]), \
"invalid shape {}".format(v.shape)
self.handler.add_mv(m.array, v.array, out.array)
@check_for_inf_or_nan
def add_st(self, s, t, out):
assert_debug_arrays(t, out)
assert_is_scalar(s)
assert_shapes_equal(t, out)
self.handler.add_st(s, t.array, out.array)
@check_for_inf_or_nan
def add_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.add_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def avgpool2d_backward_batch(self, inputs, window, outputs, padding,
stride, in_deltas, out_deltas):
assert_debug_arrays(inputs, outputs, in_deltas, out_deltas)
assert_is_shape(window)
assert len(window) == 2, "len({}) != 2".format(window)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
assert_shapes_equal(inputs, in_deltas)
assert_shapes_equal(outputs, out_deltas)
# TODO: check shapes of inputs, outputs
self.handler.avgpool2d_backward_batch(inputs.array, window,
outputs.array, padding, stride,
in_deltas.array,
out_deltas.array)
@check_for_inf_or_nan
def avgpool2d_forward_batch(self, inputs, window, outputs, padding,
stride):
assert_debug_arrays(inputs, outputs)
assert_is_shape(window)
assert len(window) == 2, "len({}) != 2".format(window)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
# TODO: check shapes of inputs, outputs,
self.handler.avgpool2d_forward_batch(inputs.array, window,
outputs.array, padding, stride)
@check_for_inf_or_nan
def binarize_v(self, v, out):
assert_debug_arrays(v, out)
assert len(v.shape) == len(out.shape) == 2
assert v.shape == (out.shape[0], 1)
assert self.handler.get_numpy_copy(v.array).min() >= 0
assert int(self.handler.get_numpy_copy(v.array).max()) < out.shape[1]
self.handler.binarize_v(v.array, out.array)
@check_for_inf_or_nan
def broadcast_t(self, a, axis, out):
assert_debug_arrays(a, out)
assert (isinstance(axis, int) and 0 <= axis < len(out.shape)),\
"invalid axis {}".format(axis)
assert a.shape[axis] == 1
assert a.shape == out.shape[:axis] + (1,) + out.shape[axis+1:]
self.handler.broadcast_t(a.array, axis, out.array)
@check_for_inf_or_nan
def clip_t(self, a, a_min, a_max, out):
assert_debug_arrays(a, out)
assert_is_scalar(a_min)
assert_is_scalar(a_max)
assert_shapes_equal(a, out)
assert a_min <= a_max, "not {} <= {}".format(a_min, a_max)
self.handler.clip_t(a.array, a_min, a_max, out.array)
@check_for_inf_or_nan
def conv2d_backward_batch(self, inputs, weights, padding, stride,
in_deltas, out_deltas, weight_deltas,
bias_deltas):
assert_debug_arrays(inputs, weights, in_deltas, out_deltas,
weight_deltas, bias_deltas)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
# TODO: check shapes of inputs, weights, in_deltas, out_deltas,
# TODO: weight_deltas, bias_deltas
self.handler.conv2d_backward_batch(inputs.array, weights.array,
padding, stride, in_deltas.array,
out_deltas.array,
weight_deltas.array,
bias_deltas.array)
@check_for_inf_or_nan
def conv2d_forward_batch(self, inputs, weights, bias, outputs,
padding, stride):
assert_debug_arrays(inputs, weights, bias, outputs)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
# TODO check shapes of inputs, weights, bias, and outputs
self.handler.conv2d_forward_batch(inputs.array, weights.array,
bias.array, outputs.array,
padding, stride)
@check_for_inf_or_nan
def dot_add_mm(self, a, b, out, transa=False, transb=False):
assert_debug_arrays(a, b, out)
assert len(a.shape) == 2, "len({}) != 2".format(a.shape)
assert len(b.shape) == 2, "len({}) != 2".format(b.shape)
assert len(out.shape) == 2, "len({}) != 2".format(out.shape)
assert transa in [True, False]
assert transb in [True, False]
a1, a2 = a.shape
b1, b2 = b.shape
if transa:
a1, a2 = a2, a1
if transb:
b1, b2 = b2, b1
assert a2 == b1, "{} != {} ({}, {})".format(a2, b1, transa, transb)
assert out.shape == (a1, b2), "{} != {}".format(out.shape, (a1, b2))
self.handler.dot_add_mm(a.array, b.array, out.array, transa, transb)
@check_for_inf_or_nan
def dot_mm(self, a, b, out, transa=False, transb=False):
assert_debug_arrays(a, b, out)
assert len(a.shape) == 2, "len({}) != 2".format(a.shape)
assert len(b.shape) == 2, "len({}) != 2".format(b.shape)
assert len(out.shape) == 2, "len({}) != 2".format(out.shape)
assert transa in [True, False]
assert transb in [True, False]
a1, a2 = a.shape
b1, b2 = b.shape
if transa:
a1, a2 = a2, a1
if transb:
b1, b2 = b2, b1
assert a2 == b1, "{} != {} ({}, {})".format(a2, b1, transa, transb)
assert out.shape == (a1, b2), "{} != {}".format(out.shape, (a1, b2))
self.handler.dot_mm(a.array, b.array, out.array, transa, transb)
@check_for_inf_or_nan
def divide_mv(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
assert v.shape == (m.shape[0], 1) or v.shape == (1, m.shape[1]), \
"invalid shape {}".format(v.shape)
self.handler.divide_mv(m.array, v.array, out.array)
@check_for_inf_or_nan
def divide_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.divide_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def fill_gaussian(self, mean, std, out):
assert_debug_arrays(out)
assert std >= 0.0
self.handler.fill_gaussian(mean, std, out.array)
@check_for_inf_or_nan
def generate_probability_mask(self, mask, probability):
assert_debug_arrays(mask)
assert_is_scalar(probability)
assert 0.0 <= probability <= 1.0, "{}".format(probability)
self.handler.generate_probability_mask(mask.array, probability)
@check_for_inf_or_nan
def index_m_by_v(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(v, out)
assert len(m.shape) == len(v.shape) == 2
assert v.shape == (m.shape[0], 1)
self.handler.index_m_by_v(m.array, v.array, out.array)
@check_for_inf_or_nan
def log_t(self, a, out):
assert_debug_arrays(a, out)
assert_shapes_equal(a, out)
self.handler.log_t(a.array, out.array)
@check_for_inf_or_nan
def maxpool2d_backward_batch(self, inputs, window, outputs, padding,
stride, argmax, in_deltas, out_deltas):
assert_debug_arrays(inputs, outputs, argmax, in_deltas, out_deltas)
assert_is_shape(window)
assert len(window) == 2, "len({}) != 2".format(window)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
assert_shapes_equal(inputs, in_deltas)
assert_shapes_equal(outputs, out_deltas)
# TODO: check shapes of inputs, outputs, argmax
self.handler.maxpool2d_backward_batch(inputs.array, window,
outputs.array,
padding, stride, argmax.array,
in_deltas.array,
out_deltas.array)
@check_for_inf_or_nan
def maxpool2d_forward_batch(self, inputs, window, outputs, padding,
stride, argmax):
assert_debug_arrays(inputs, outputs, argmax)
assert_is_shape(window)
assert len(window) == 2, "len({}) != 2".format(window)
assert_is_shape(stride)
assert len(stride) == 2, "len({}) != 2".format(stride)
assert isinstance(padding, int) and 0 <= padding, \
"invalid padding {}".format(padding)
# TODO: check shapes of inputs, outputs, argmax
self.handler.maxpool2d_forward_batch(inputs.array, window,
outputs.array, padding, stride,
argmax.array)
@check_for_inf_or_nan
def merge_tt(self, a, b, out):
assert(a.shape[-1] + b.shape[-1] == out.shape[-1])
assert_debug_arrays(a, b, out)
self.handler.merge_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def modulo_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.modulo_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def mult_add_st(self, s, t, out):
assert_debug_arrays(t, out)
assert_is_scalar(s)
assert_shapes_equal(t, out)
self.handler.mult_add_st(s, t.array, out.array)
@check_for_inf_or_nan
def mult_add_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.mult_add_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def mult_mv(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
assert v.shape in [(m.shape[0], 1), (1, m.shape[1]), m.shape],\
"invalid shape {} (m.shape = {})".format(v.shape, m.shape)
self.handler.mult_mv(m.array, v.array, out.array)
@check_for_inf_or_nan
def mult_add_mv(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
assert v.shape == (m.shape[0], 1) or v.shape == (1, m.shape[1]), \
"invalid shape {}".format(v.shape)
self.handler.mult_add_mv(m.array, v.array, out.array)
@check_for_inf_or_nan
def mult_st(self, s, t, out):
assert_debug_arrays(t, out)
assert_is_scalar(s)
assert_shapes_equal(t, out)
self.handler.mult_st(s, t.array, out.array)
@check_for_inf_or_nan
def mult_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.mult_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def sign_t(self, a, out):
assert_debug_arrays(a, out)
assert_shapes_equal(a, out)
self.handler.sign_t(a.array, out.array)
@check_for_inf_or_nan
def split_add_tt(self, x, out_a, out_b):
assert(out_a.shape[-1] + out_b.shape[-1] == x.shape[-1])
assert_debug_arrays(out_a, out_b, x)
self.handler.split_add_tt(x.array, out_a.array, out_b.array)
@check_for_inf_or_nan
def sqrt_t(self, a, out):
assert_debug_arrays(a, out)
assert_shapes_equal(a, out)
self.handler.sqrt_t(a.array, out.array)
@check_for_inf_or_nan
def subtract_mv(self, m, v, out):
assert_debug_arrays(m, v, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
assert v.shape == (m.shape[0], 1) or v.shape == (1, m.shape[1]), \
"invalid shape {}".format(v.shape)
self.handler.subtract_mv(m.array, v.array, out.array)
@check_for_inf_or_nan
def subtract_tt(self, a, b, out):
assert_debug_arrays(a, b, out)
assert_shapes_equal(a, b, out)
self.handler.subtract_tt(a.array, b.array, out.array)
@check_for_inf_or_nan
def sum_t(self, a, axis, out):
assert_debug_arrays(a, out)
dims = len(a.shape)
assert axis is None or (isinstance(axis, int) and 0 <= axis < dims),\
"invalid axis {}".format(axis)
# TODO check shapes of a and out
self.handler.sum_t(a.array, axis, out.array)
# ------------------------ Activation functions ------------------------- #
@check_for_inf_or_nan
def sigmoid(self, x, y):
assert_debug_arrays(x, y)
assert_shapes_equal(x, y)
self.handler.sigmoid(x.array, y.array)
@check_for_inf_or_nan
def sigmoid_deriv(self, x, y, dy, dx):
assert_debug_arrays(y, dy, dx)
assert_shapes_equal(y, dy, dx)
if x is not None:
assert_debug_arrays(x)
assert_shapes_equal(x, y)
x = x.array
self.handler.sigmoid_deriv(x, y.array, dy.array, dx.array)
@check_for_inf_or_nan
def tanh(self, x, y):
assert_debug_arrays(x, y)
assert_shapes_equal(x, y)
self.handler.tanh(x.array, y.array)
@check_for_inf_or_nan
def tanh_deriv(self, x, y, dy, dx):
assert_debug_arrays(y, dy, dx)
assert_shapes_equal(y, dy, dx)
if x is not None:
assert_debug_arrays(x)
assert_shapes_equal(x, y)
x = x.array
self.handler.tanh_deriv(x, y.array, dy.array, dx.array)
@check_for_inf_or_nan
def rel(self, x, y):
assert_debug_arrays(x, y)
assert_shapes_equal(x, y)
self.handler.rel(x.array, y.array)
@check_for_inf_or_nan
def rel_deriv(self, x, y, dy, dx):
assert_debug_arrays(y, dy, dx)
assert_shapes_equal(y, dy, dx)
if x is not None:
assert_debug_arrays(x)
assert_shapes_equal(x, y)
x = x.array
self.handler.rel_deriv(x, y.array, dy.array, dx.array)
@check_for_inf_or_nan
def el(self, x, y):
assert_debug_arrays(x, y)
assert_shapes_equal(x, y)
self.handler.el(x.array, y.array)
@check_for_inf_or_nan
def el_deriv(self, x, y, dy, dx):
assert_debug_arrays(y, dy, dx)
assert_shapes_equal(y, dy, dx)
if x is not None:
assert_debug_arrays(x)
assert_shapes_equal(x, y)
x = x.array
self.handler.el_deriv(x, y.array, dy.array, dx.array)
@check_for_inf_or_nan
def softmax_m(self, m, out):
assert_debug_arrays(m, out)
assert_shapes_equal(m, out)
assert len(m.shape) == 2, "len({}) != 2".format(m.shape)
self.handler.softmax_m(m.array, out.array)
# ############################ Helper Methods ############################### #
def assert_is_shape(shape):
assert isinstance(shape, tuple), type(shape)
for i in shape:
assert isinstance(i, int), "{} was {}".format(i, type(i))
assert 0 <= i, "{} < 0".format(i)
def assert_debug_arrays(*arrays):
for i, arr in enumerate(arrays):
assert isinstance(arr, DebugArray), \
"{}. is no DebugArray but a {}".format(i, type(arr))
def assert_shapes_equal(ref_shape, *shapes):
if isinstance(ref_shape, DebugArray):
ref_shape = ref_shape.shape
assert_is_shape(ref_shape)
for i, shape in enumerate(shapes, start=1):
if isinstance(shape, DebugArray):
shape = shape.shape
assert_is_shape(shape)
assert shape == ref_shape, \
"Shape mismatch: {}[arg_nr={}] != {}[arg_nr=0]".format(shape, i,
ref_shape)
def assert_is_scalar(s):
assert isinstance(s, (int, float)), \
"{} is not a scalar but a {}".format(s, type(s))
|
import scrapy
class ImageItem(scrapy.Item):
image_urls = scrapy.Field()
images = scrapy.Field()
# image_urlsๅimagesๆฏๅบๅฎ็
|
x, y = 0, 1
while x < 1000:
print(x)
xnew = y
ynew = x+y
x = xnew
y = ynew
|
#!/usr/bin/env python
# coding: utf-8
# Designed by ARH.
# ticker = 'BBVA';
import os, sys
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
ticker = sys.argv[1]
print('****************************')
print('* Reading ticker: {0}.'.format(ticker))
# We define the global url from investing equities
URL_INIT = 'https://es.investing.com/equities/'
# We define a dictionary that contains the tickers as well as their corresponding dividend sites.
ticker_dict = {
'ANA': 'acciona-sa-dividends/',
'ACX': 'acerinox-dividends/',
'ACS': 'acs-cons-y-serv-dividends/',
'AENA': 'aena-aeropuertos-sa-dividends/',
'AMA': 'amadeus-dividends/',
'MTS': 'arcelormittal-reg-dividends?cid=32439/',
'SABE': 'bco-de-sabadell-dividends/',
'BKIA': 'bankia-dividends/',
'BKT': 'bankinter-dividends/',
'BBVA': 'bbva-dividends/',
'CABK': 'caixabank-sa-dividends/',
'CLNX': 'cellnex-telecom-dividends/',
'CIEA': 'cie-automotive-sa-dividends/',
'COL': 'inmob-colonial-dividends/',
'ENAG': 'enagas-dividends/',
'ENC': 'ence-energia-y-celulosa-sa-dividends/',
'ELE': 'endesa-dividends/',
'FER': 'grupo-ferrovial-dividends/',
'GRLS': 'grifols-dividends/',
'ICAG': 'intl.-cons.-air-grp-dividends?cid=13809/',
'IBE': 'iberdrola-dividends/',
'ITX': 'inditex-dividends/',
'IDR': 'indra-sistemas-dividends/',
'MAP': 'mapfre-dividends/',
'MASM': 'world-wide-web-ibercom-s.a.-dividends/',
'TL5': 'mediaset-esp-dividends/',
'MEL': 'melia-hotels-international-sa-dividends/',
'MRL': 'merlin-properties-sa-dividends/',
'NTGY': 'gas-natural-sdg-dividends/',
'REE': 'red-electrica-dividends/',
'REP': 'repsol-ypf-dividends/',
'SAN': 'banco-santander-dividends/',
'SGREN': 'gamesa-dividends/',
'TEF': 'telefonica-dividends/',
'VIS': 'viscofan-sa-dividends/',
}
# We define the url and the agent that will allow to download the html file.
url = os.path.join(URL_INIT, ticker_dict[ticker])
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'}
reg_url = url
print('* Downloading webpage.')
# We make the request petition to investing.
req = Request(url=reg_url, headers=headers)
string = urlopen(req).read()
print('* Parsing HTML.')
# We use BeautifulSoup to parse the html information delivered by the website.
soup = BeautifulSoup(string, 'html.parser')
string_decoded = string.decode('utf-8');
'''
We locallize the table name directly from the html text data. The identifier will be
'<table id="dividendsHistoryDat', which is the name given to the table that contains
dividend data.
'''
# We essentially look for our string line by line in the html text.
for i in string_decoded.split('\n'):
if '<table id="dividendsHistoryDat' in i:
line = i;
# Once we find its real identifier, we use it to get the table with all its content.
table_id = line.split('"')[1];
table = soup.find(id=table_id);
# We get the different cells available at the table
data_list = table.findAll('tr');
'''
We read the table content and store it in a dictionary. We store both the quantity and
the pay day.
'''
pay_info = [];
for i in data_list[1:]:
pay_info.append({
'payday': i.findAll('td')[3].text,
'quantity': float(i.findAll('td')[1].text.replace(',','.')),
})
pay_info
print('* Writing csv file.')
# We write the outcome in a text file.
import csv
csv_file = "{0}_dividends.csv".format(ticker)
csv_columns = ['quantity', 'payday']
try:
with open(csv_file, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=csv_columns)
writer.writeheader()
for data in pay_info:
writer.writerow(data)
except IOError:
print("I/O error")
print('****************************')
|
# -*- coding: utf-8 -*-
# OpenTsiolkovskyใใๅบๅใใใ.csvใใกใคใซใซๆ
ๅ ฑใ่ฟฝๅ ใใ
# ็นใซIIPใใขใณใใ้ขไฟใฎๅคใๅบๅ
# outputใใฉใซใใฎไธญใซใใcsvใใกใคใซใ่ชญใฟ่พผใใงใextend.csvใจใใฆๅบๅ
# ๏ผ๏ผ๏ผ๏ผไฝฟใๆน๏ผ๏ผ๏ผ๏ผ
# python extend_output.py (input_json_file)
#
# IIPใฎๅๆฃๅใฎๅๅพใฏใใใฎๆ็นใงใฎใญใฑใใใๆจๅใใใฃใฆ็ๆจชใซๅ ้ใใใ
# ใจใใฆใใซใใใชใๆ้ใพใงใซๅข้ใใใ้ใๅ ็ฎใใฆIIPใฎ็งปๅ่ท้ขใใ็ฎๅบใ
#
# ใฉใคใใฉใชใซpyprojใ็จใใฆใใใใใซๅ
ใซpyprojใใคใณในใใผใซใฎใใจใ
# conda insatall pyproj
# pip install pyproj
#
# Copyright (c) 2016 Interstellar Technologies Inc. Takahiro Inagawa
# Released under the MIT license
import sys
import platform
import numpy as np
# import matplotlib as mpl
import matplotlib.pyplot as plt
# import matplotlib.font_manager
# from matplotlib.font_manager import FontProperties
# from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import json
from pyproj import Geod
from collections import namedtuple
from numpy import sin, cos, sqrt, arctan2, arcsin, pi
plt.ion()
# ๅฎๆฐใฎ่จญๅฎ
g = Geod(ellps='WGS84')
WGS84 = namedtuple('WGS84', ['re_a', # [m] WGS84ใฎ้ท่ปธ
'eccen1', # First Eccentricity
'eccen1sqr', # First Eccentricity squared
'one_f', # ๆๅนณ็fใฎ1/f๏ผๅนณๆปๅบฆ๏ผ
're_b', # [m] WGS84ใฎ็ญ่ปธ
'e2', # ็ฌฌไธ้ขๅฟ็eใฎ2ไน
'ed2' # ็ฌฌไบ้ขๅฟ็e'ใฎ2ไน
])
wgs84 = WGS84(6378137.0, 8.1819190842622e-2, 6.69437999014e-3, 298.257223563,
6356752.314245, 6.6943799901414e-3, 6.739496742276486e-3)
Earth = namedtuple('Earth', ['omega']) # ๅฐ็ใฎ่ช่ปข่ง้ๅบฆ [rad/s])
earth = Earth(7.2921159e-5)
deg2rad = lambda deg: deg * np.pi / 180.0
rad2deg = lambda rad: rad * 180.0 / np.pi
def dcmECI2ECEF(second):
theta = earth.omega * second
dcm = np.array([[cos(theta), sin(theta), 0.0],
[-sin(theta), cos(theta), 0.0],
[0.0, 0.0, 1.0]])
return dcm
def n_posECEF2LLH(phi_n_deg):
return wgs84.re_a / sqrt(1.0 - wgs84.e2 * sin(deg2rad(phi_n_deg)) * sin(deg2rad(phi_n_deg)))
def posLLH(posECEF_):
# deg่ฟใ
p = sqrt(posECEF_[0] **2 + posECEF_[1] **2)
theta = arctan2(posECEF_[2] * wgs84.re_a, p * wgs84.re_b) # rad
lat = rad2deg(arctan2(posECEF_[2] + wgs84.ed2 * wgs84.re_b * pow(sin(theta), 3), p - wgs84.e2 * wgs84.re_a * pow(cos(theta),3)))
lon = rad2deg(arctan2(posECEF_[1], posECEF_[0]))
alt = p / cos(deg2rad(lat)) - n_posECEF2LLH(lat)
return np.array([lat, lon, alt])
def dcmECEF2NED(posLLH_):
lat = deg2rad(posLLH_[0])
lon = deg2rad(posLLH_[1])
dcm = np.array([[-sin(lat)*cos(lon), -sin(lat)*sin(lon), cos(lat)],
[-sin(lon), cos(lon), 0],
[-cos(lat)*cos(lon), -cos(lat)*sin(lon), -sin(lat)]])
return dcm
def dcmECI2NED(dcmECEF2NED, dcmECI2ECEF):
return dcmECEF2NED.dot(dcmECI2ECEF)
def posECEF(dcmECI2ECEF, posECI_):
return dcmECI2ECEF.dot(posECI_)
def posECEF_from_LLH(posLLH_):
lat = deg2rad(posLLH_[0])
lon = deg2rad(posLLH_[1])
alt = posLLH_[2]
W = sqrt(1.0 - wgs84.e2 * sin(lat) * sin(lat))
N = wgs84.re_a / W
pos0 = (N + alt) * cos(lat) * cos(lon)
pos1 = (N + alt) * cos(lat) * sin(lon)
pos2 = (N * (1 - wgs84.e2) + alt) * sin(lat)
return np.array([pos0, pos1, pos2])
def posLLH_IIP(t, posECI_, vel_ECEF_NEDframe_):
g0 = 9.80665
dcmECI2ECEF_ = dcmECI2ECEF(t)
posLLH_ = posLLH(posECEF(dcmECI2ECEF_, posECI_))
dcmNED2ECI_ = dcmECI2NED(dcmECEF2NED(posLLH_), dcmECI2ECEF_).T
vel_north_ = vel_ECEF_NEDframe_[0]
vel_east_ = vel_ECEF_NEDframe_[1]
vel_up_ = - vel_ECEF_NEDframe_[2]
h = posLLH_[2]
tau = 1.0/g0 * (vel_up_ + sqrt(vel_up_**2 + 2 * h * g0))
dist_IIP_from_now_NED = np.array([vel_north_ * tau, vel_east_ * tau, -h])
posECI_IIP_ = posECI_ + dcmNED2ECI_.dot(dist_IIP_from_now_NED)
posECEF_IIP_ = posECEF(dcmECI2ECEF(t), posECI_IIP_)
return posLLH(posECEF_IIP_)
def distance_surface(pos0_LLH_, pos1_LLH_):
earth_radius = 6378137 # ๅฐ็ๅๅพ m
pos0_ECEF_ = posECEF_from_LLH(pos0_LLH_)
pos1_ECEF_ = posECEF_from_LLH(pos1_LLH_)
theta = np.arccos(np.dot(pos0_ECEF_, pos1_ECEF_) / np.linalg.norm(pos0_ECEF_) / np.linalg.norm(pos1_ECEF_)) # radius
return earth_radius * theta
def radius_IIP(t, posECI_, vel_ECEF_NEDframe_, cutoff_time, thrust, weight):
delta_vel = thrust / weight * cutoff_time
point_IIP = posLLH_IIP(t, posECI_, vel_ECEF_NEDframe_)
delta_IIP = posLLH_IIP(t, posECI_, vel_ECEF_NEDframe_ + delta_vel * np.array([1,1,0]))
_a, _b, distance_2d = g.inv(point_IIP[1], point_IIP[0], delta_IIP[1], delta_IIP[0])
return distance_2d
def antenna_param(antenna_LLH_, rocket_LLH_):
# g = Geod(ellps='WGS84')
azimuth, back_azimuth, distance_2d = g.inv(antenna_LLH_[1], antenna_LLH_[0], rocket_LLH_[1], rocket_LLH_[0])
elevation = np.rad2deg(np.arctan2(rocket_LLH_[2], distance_2d))
distance_3d = np.hypot(distance_2d, rocket_LLH_[2])
return distance_2d, distance_3d, azimuth, elevation
def radius_visible(altitude, invalid_angle_deg = 3):
# ใญใฑใใใฎ้ซๅบฆใจ็กๅน่งๅบฆใๅ
ฅๅใใฆๅฏ่ฆ็ฏๅฒใฎๅๅพใ่จ็ฎ
# ๅฏ่ฆๅๅพ m
re = 6378137 # ๅฐ็ๅๅพ m
epsilon = np.deg2rad(invalid_angle_deg)
phi = np.arccos(re/(re+altitude) * np.cos(epsilon)) - epsilon
return phi * re
if __name__ == '__main__':
# ==== USER INPUT ====
# ๆบๆณ: MOMO ๅฐไธๅฑใขใณใใใฒใคใณ UHF
antenna_lat = 42.5039248 # ๅฐไธๅฑไฝ็ฝฎใ็ทฏๅบฆ (deg)
antenna_lon = 143.44954216 # ๅฐไธๅฑไฝ็ฝฎใ็ตๅบฆ (deg)
antenna_alt = 25.0 # ๅฐไธๅฑใ้ซๅบฆ (m)
cutoff_time = 2.0 # IIPๅๆฃ็ฎๅบใฎใใใฎใจใณใธใณใซใใใชใๆ้
invalid_angle_deg = 3.0 # ๅฏ่ฆ็ฏๅฒ่จ็ฎใฎใใใฎๅฏ่ฆไปฐ่งใฎไธ้ๅค (deg)
# ==== USER INPUT ====
argvs = sys.argv # ใณใใณใใฉใคใณๅผๆฐใๆ ผ็ดใใใชในใใฎๅๅพ
argc = len(argvs) # ๅผๆฐใฎๅๆฐ
if (argc != 1):
file_name = argvs[1]
else:
file_name = "param_sample_01.json"
# ๅ
ฅๅใฎ็ขบ่ช
print("==== INPUT PARAMETER ===")
print("input JSON file : " + file_name)
print("viewer latitude (deg) : %.6f" % antenna_lat)
print("viewer longitude (deg) : %.6f" % antenna_lon)
print("viewer altitude (m) : %.1f" % antenna_alt)
print("IIP cut-off time (sec) : %.1f" % cutoff_time)
print("visible range invalid angle (deg) : %.1f" % invalid_angle_deg)
print("==== PROCESSING ====")
# ใใกใคใซ่ชญใฟ่พผใฟ
f = open(file_name)
data = json.load(f)
following_stage_exist = []
rocket_name = data["name(str)"]
following_stage_exist.append(data["stage1"]["stage"]["following stage exist?(bool)"])
if ("stage2" in data):
following_stage_exist.append(data["stage2"]["stage"]["following stage exist?(bool)"])
if ("stage3" in data):
following_stage_exist.append(data["stage3"]["stage"]["following stage exist?(bool)"])
# ใใผใฟไฝใ
stage_index = 1
for stage_exist in following_stage_exist:
print("Now processing " + str(stage_index) + " stage csv file ...")
file_name = "output/" + rocket_name + "_dynamics_" + str(stage_index) + ".csv"
df = pd.read_csv(file_name, index_col=False)
posLLH_antenna = np.array([antenna_lat, antenna_lon, antenna_alt])
# posLLH_antenna = np.array([df[" lat(deg)"][0], df[" lon(deg)"][0], df[" altitude(m)"][0]])
dis2_a = []
dis3_a = []
az_a = []
el_a = []
radius_IIP_a = []
radius_visible_a = []
for key, row in df.iterrows():
time = row[0]
mass = row[1]
thrust = row[2]
posLLH_ = np.array([row[3], row[4], row[5]])
posECI_ = np.array([row[6], row[7], row[8]])
vel_ECEF_NEDframe = np.array([row[12], row[13], row[14]])
dis2, dis3, az, el = antenna_param(posLLH_antenna, posLLH_)
radius_IIP_ = radius_IIP(time, posECI_, vel_ECEF_NEDframe, cutoff_time, thrust, mass)
radius_visible_ = radius_visible(posLLH_[2], invalid_angle_deg)
dis2_a.append(dis2)
dis3_a.append(dis3)
az_a.append(az)
el_a.append(el)
radius_IIP_a.append(radius_IIP_)
radius_visible_a.append(radius_visible_)
df["distance 2d(m)"] = dis2_a
df["distance 3d(m)"] = dis3_a
df["antenna lat(deg)"] = antenna_lat
df["antenna lon(deg)"] = antenna_lon
df["antenna azimuth(deg)"] = az_a
df["antenna elevation(deg)"] = el_a
df["antenna body difference(deg)"] = df["attitude_elevation(deg)"] - df["antenna elevation(deg)"]
df["IIP radius(m)"] = radius_IIP_a
# ใใกใคใซๅบๅ
df.to_csv("output/" + rocket_name + "_dynamics_" + str(stage_index) + "_extend.csv", index=False)
stage_index += 1
# PLOT
plt.figure()
plt.plot(df["time(s)"], dis2_a, label="distance 2d")
plt.plot(df["time(s)"], dis3_a, label="distance 3d")
plt.title(rocket_name + " " + str(stage_index) + " stage distance")
plt.xlabel("time (s)")
plt.ylabel("distance (m)")
plt.legend(loc="best")
plt.grid()
plt.figure()
plt.plot(df["time(s)"], az_a, label="azimuth")
plt.plot(df["time(s)"], el_a, label="elevation")
plt.title(rocket_name + " " + str(stage_index) + " stage antenna angle")
plt.xlabel("time (s)")
plt.ylabel("angle (deg)")
plt.legend(loc="best")
plt.grid()
plt.figure()
plt.plot(df["time(s)"], radius_IIP_a, label="IIP radius\ncut-off time = %.1f sec" % (cutoff_time))
plt.title(rocket_name + " " + str(stage_index) + " stage IIP radius")
plt.xlabel("time (s)")
plt.ylabel("radius (m)")
plt.legend(loc="best")
plt.grid()
# plt.show()
if (stage_exist == False): break
|
""" Searches using MCMC-based methods """
import sys
import os
import copy
import logging
from collections import OrderedDict
import subprocess
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from ptemcee import Sampler as PTSampler
import corner
import dill as pickle
import pyfstat.core as core
from pyfstat.core import tqdm, args, read_par
import pyfstat.optimal_setup_functions as optimal_setup_functions
import pyfstat.helper_functions as helper_functions
class MCMCSearch(core.BaseSearchClass):
"""MCMC search using ComputeFstat
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, defaults is `'data'`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (*?) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separate for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min float < 0, optional
The log_10(beta) value, if given the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary parameters
BSGL: bool, optional
If true, use the BSGL statistic
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search
assumeSqrtSX: float, optional
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}
transientWindowType: str
If 'rect' or 'exp',
compute atoms so that a transient (t0,tau) map can later be computed.
('none' instead of None explicitly calls the transient-window function,
but with the full range, for debugging)
Currently only supported for nsegs=1.
tCWFstatMapVersion: str
Choose between standard 'lal' implementation,
'pycuda' for gpu, and some others for devel/debug.
Attributes
----------
symbol_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), to Latex math
symbols for plots
unit_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), and the
units (i.e. `Hz`)
transform_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
symbol_dictionary = dict(
F0="$f$",
F1="$\dot{f}$",
F2="$\ddot{f}$",
Alpha=r"$\alpha$",
Delta="$\delta$",
asini="asini",
period="P",
ecc="ecc",
tp="tp",
argp="argp",
)
unit_dictionary = dict(
F0="Hz",
F1="Hz/s",
F2="Hz/s$^2$",
Alpha=r"rad",
Delta="rad",
asini="",
period="s",
ecc="",
tp="",
argp="",
)
transform_dictionary = {}
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
transientWindowType=None,
tCWFstatMapVersion="lal",
):
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.transientWindowType = transientWindowType
self.tCWFstatMapVersion = tCWFstatMapVersion
if os.path.isdir(outdir) is False:
os.mkdir(outdir)
self._add_log_file()
logging.info("Set-up MCMC search for model {}".format(self.label))
if sftfilepattern:
logging.info("Using data {}".format(self.sftfilepattern))
else:
logging.info("No sftfilepattern given")
if injectSources:
logging.info("Inject sources: {}".format(injectSources))
self.pickle_path = "{}/{}_saved_data.p".format(self.outdir, self.label)
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._set_likelihoodcoef()
self._log_input()
def _set_likelihoodcoef(self):
self.likelihoodcoef = np.log(70.0 / self.rhohatmax ** 4)
def _log_input(self):
logging.info("theta_prior = {}".format(self.theta_prior))
logging.info("nwalkers={}".format(self.nwalkers))
logging.info("nsteps = {}".format(self.nsteps))
logging.info("ntemps = {}".format(self.ntemps))
logging.info("log10beta_min = {}".format(self.log10beta_min))
def _initiate_search_object(self):
logging.info("Setting up search object")
self.search = core.ComputeFstat(
tref=self.tref,
sftfilepattern=self.sftfilepattern,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
detectors=self.detectors,
BSGL=self.BSGL,
transientWindowType=self.transientWindowType,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
binary=self.binary,
injectSources=self.injectSources,
assumeSqrtSX=self.assumeSqrtSX,
SSBprec=self.SSBprec,
tCWFstatMapVersion=self.tCWFstatMapVersion,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def logp(self, theta_vals, theta_prior, theta_keys, search):
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def logl(self, theta, search):
for j, theta_i in enumerate(self.theta_idxs):
self.fixed_theta[theta_i] = theta[j]
twoF = search.get_fullycoherent_twoF(
self.minStartTime, self.maxStartTime, *self.fixed_theta
)
return twoF / 2.0 + self.likelihoodcoef
def _unpack_input_theta(self):
full_theta_keys = ["F0", "F1", "F2", "Alpha", "Delta"]
if self.binary:
full_theta_keys += ["asini", "period", "ecc", "tp", "argp"]
full_theta_keys_copy = copy.copy(full_theta_keys)
full_theta_symbols = [
"$f$",
"$\dot{f}$",
"$\ddot{f}$",
r"$\alpha$",
r"$\delta$",
]
if self.binary:
full_theta_symbols += ["asini", "period", "ecc", "tp", "argp"]
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in full_theta_keys]
self.theta_idxs = [full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [full_theta_symbols[i] for i in self.theta_idxs]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
def _evaluate_logpost(self, p0vec):
init_logp = np.array(
[
self.logp(p, self.theta_prior, self.theta_keys, self.search)
for p in p0vec
]
)
init_logl = np.array([self.logl(p, self.search) for p in p0vec])
return init_logl + init_logp
def _check_initial_points(self, p0):
for nt in range(self.ntemps):
logging.info("Checking temperature {} chains".format(nt))
num = sum(self._evaluate_logpost(p0[nt]) == -np.inf)
if num > 0:
logging.warning(
"Of {} initial values, {} are -np.inf due to the prior".format(
len(p0[0]), num
)
)
p0 = self._generate_new_p0_to_fix_initial_points(p0, nt)
def _generate_new_p0_to_fix_initial_points(self, p0, nt):
logging.info("Attempting to correct intial values")
init_logpost = self._evaluate_logpost(p0[nt])
idxs = np.arange(self.nwalkers)[init_logpost == -np.inf]
count = 0
while sum(init_logpost == -np.inf) > 0 and count < 100:
for j in idxs:
p0[nt][j] = p0[nt][np.random.randint(0, self.nwalkers)] * (
1 + np.random.normal(0, 1e-10, self.ndim)
)
init_logpost = self._evaluate_logpost(p0[nt])
count += 1
if sum(init_logpost == -np.inf) > 0:
logging.info("Failed to fix initial priors")
else:
logging.info("Suceeded to fix initial priors")
return p0
def setup_initialisation(self, nburn0, scatter_val=1e-10):
""" Add an initialisation step to the MCMC run
If called prior to `run()`, adds an intial step in which the MCMC
simulation is run for `nburn0` steps. After this, the MCMC simulation
continues in the usual manner (i.e. for nburn and nprod steps), but the
walkers are reset scattered around the maximum likelihood position
of the initialisation step.
Parameters
----------
nburn0: int
Number of initialisation steps to take
scatter_val: float
Relative number to scatter walkers around the maximum likelihood
position after the initialisation step
"""
logging.info(
"Setting up initialisation with nburn0={}, scatter_val={}".format(
nburn0, scatter_val
)
)
self.nsteps = [nburn0] + self.nsteps
self.scatter_val = scatter_val
# def setup_burnin_convergence_testing(
# self, n=10, test_type='autocorr', windowed=False, **kwargs):
# """ Set up convergence testing during the MCMC simulation
#
# Parameters
# ----------
# n: int
# Number of steps after which to test convergence
# test_type: str ['autocorr', 'GR']
# If 'autocorr' use the exponential autocorrelation time (kwargs
# passed to `get_autocorr_convergence`). If 'GR' use the Gelman-Rubin
# statistic (kwargs passed to `get_GR_convergence`)
# windowed: bool
# If True, only calculate the convergence test in a window of length
# `n`
# **kwargs:
# Passed to either `_test_autocorr_convergence()` or
# `_test_GR_convergence()` depending on `test_type`.
#
# """
# logging.info('Setting up convergence testing')
# self.convergence_n = n
# self.convergence_windowed = windowed
# self.convergence_test_type = test_type
# self.convergence_kwargs = kwargs
# self.convergence_diagnostic = []
# self.convergence_diagnosticx = []
# if test_type in ['autocorr']:
# self._get_convergence_test = self._test_autocorr_convergence
# elif test_type in ['GR']:
# self._get_convergence_test = self._test_GR_convergence
# else:
# raise ValueError('test_type {} not understood'.format(test_type))
#
#
# def _test_autocorr_convergence(self, i, sampler, test=True, n_cut=5):
# try:
# acors = np.zeros((self.ntemps, self.ndim))
# for temp in range(self.ntemps):
# if self.convergence_windowed:
# j = i-self.convergence_n
# else:
# j = 0
# x = np.mean(sampler.chain[temp, :, j:i, :], axis=0)
# acors[temp, :] = emcee.autocorr.exponential_time(x)
# c = np.max(acors, axis=0)
# except emcee.autocorr.AutocorrError:
# logging.info('Failed to calculate exponential autocorrelation')
# c = np.zeros(self.ndim) + np.nan
# except AttributeError:
# logging.info('Unable to calculate exponential autocorrelation')
# c = np.zeros(self.ndim) + np.nan
#
# self.convergence_diagnosticx.append(i - self.convergence_n/2.)
# self.convergence_diagnostic.append(list(c))
#
# if test:
# return i > n_cut * np.max(c)
#
# def _test_GR_convergence(self, i, sampler, test=True, R=1.1):
# if self.convergence_windowed:
# s = sampler.chain[0, :, i-self.convergence_n+1:i+1, :]
# else:
# s = sampler.chain[0, :, :i+1, :]
# N = float(self.convergence_n)
# M = float(self.nwalkers)
# W = np.mean(np.var(s, axis=1), axis=0)
# per_walker_mean = np.mean(s, axis=1)
# mean = np.mean(per_walker_mean, axis=0)
# B = N / (M-1.) * np.sum((per_walker_mean-mean)**2, axis=0)
# Vhat = (N-1)/N * W + (M+1)/(M*N) * B
# c = np.sqrt(Vhat/W)
# self.convergence_diagnostic.append(c)
# self.convergence_diagnosticx.append(i - self.convergence_n/2.)
#
# if test and np.max(c) < R:
# return True
# else:
# return False
#
# def _test_convergence(self, i, sampler, **kwargs):
# if np.mod(i+1, self.convergence_n) == 0:
# return self._get_convergence_test(i, sampler, **kwargs)
# else:
# return False
#
# def _run_sampler_with_conv_test(self, sampler, p0, nprod=0, nburn=0):
# logging.info('Running {} burn-in steps with convergence testing'
# .format(nburn))
# iterator = tqdm(sampler.sample(p0, iterations=nburn), total=nburn)
# for i, output in enumerate(iterator):
# if self._test_convergence(i, sampler, test=True,
# **self.convergence_kwargs):
# logging.info(
# 'Converged at {} before max number {} of steps reached'
# .format(i, nburn))
# self.convergence_idx = i
# break
# iterator.close()
# logging.info('Running {} production steps'.format(nprod))
# j = nburn
# iterator = tqdm(sampler.sample(output[0], iterations=nprod),
# total=nprod)
# for result in iterator:
# self._test_convergence(j, sampler, test=False,
# **self.convergence_kwargs)
# j += 1
# return sampler
def _run_sampler(self, sampler, p0, nprod=0, nburn=0, window=50):
for result in tqdm(
sampler.sample(p0, iterations=nburn + nprod), total=nburn + nprod
):
pass
self.mean_acceptance_fraction = np.mean(sampler.acceptance_fraction, axis=1)
logging.info(
"Mean acceptance fraction: {}".format(self.mean_acceptance_fraction)
)
if self.ntemps > 1:
self.tswap_acceptance_fraction = sampler.tswap_acceptance_fraction
logging.info(
"Tswap acceptance fraction: {}".format(
sampler.tswap_acceptance_fraction
)
)
self.autocorr_time = sampler.get_autocorr_time(window=window)
logging.info("Autocorrelation length: {}".format(self.autocorr_time))
return sampler
def _estimate_run_time(self):
""" Print the estimated run time
Uses timing coefficients based on a Lenovo T460p Intel(R)
Core(TM) i5-6300HQ CPU @ 2.30GHz.
"""
# Todo: add option to time on a machine, and move coefficients to
# ~/.pyfstat.conf
if (
type(self.theta_prior["Alpha"]) == dict
or type(self.theta_prior["Delta"]) == dict
):
tau0LD = 5.2e-7
tau0T = 1.5e-8
tau0S = 1.2e-4
tau0C = 5.8e-6
else:
tau0LD = 1.3e-7
tau0T = 1.5e-8
tau0S = 9.1e-5
tau0C = 5.5e-6
Nsfts = (self.maxStartTime - self.minStartTime) / 1800.0
if hasattr(self, "run_setup"):
ts = []
for row in self.run_setup:
nsteps = row[0]
nsegs = row[1]
numb_evals = np.sum(nsteps) * self.nwalkers * self.ntemps
t = (tau0S + tau0LD * Nsfts) * numb_evals
if nsegs > 1:
t += (tau0C + tau0T * Nsfts) * nsegs * numb_evals
ts.append(t)
time = np.sum(ts)
else:
numb_evals = np.sum(self.nsteps) * self.nwalkers * self.ntemps
time = (tau0S + tau0LD * Nsfts) * numb_evals
if getattr(self, "nsegs", 1) > 1:
time += (tau0C + tau0T * Nsfts) * self.nsegs * numb_evals
logging.info(
"Estimated run-time = {} s = {:1.0f}:{:1.0f} m".format(
time, *divmod(time, 60)
)
)
def run(self, proposal_scale_factor=2, create_plots=True, window=50, **kwargs):
""" Run the MCMC simulatation
Parameters
----------
proposal_scale_factor: float
The proposal scale factor used by the sampler, see Goodman & Weare
(2010). If the acceptance fraction is too low, you can raise it by
decreasing the a parameter; and if it is too high, you can reduce
it by increasing the a parameter [Foreman-Mackay (2013)].
create_plots: bool
If true, save trace plots of the walkers
window: int
The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see
ptemcee.Sampler.get_autocorr_time for further details.
**kwargs:
Passed to _plot_walkers to control the figures
Returns
-------
sampler: ptemcee.Sampler
The ptemcee ptsampler object
"""
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
if self.old_data_is_okay_to_use is True:
logging.warning("Using saved data from {}".format(self.pickle_path))
d = self.get_saved_data_dictionary()
self.samples = d["samples"]
self.lnprobs = d["lnprobs"]
self.lnlikes = d["lnlikes"]
self.all_lnlikelihood = d["all_lnlikelihood"]
self.chain = d["chain"]
return
self._initiate_search_object()
self._estimate_run_time()
sampler = PTSampler(
ntemps=self.ntemps,
nwalkers=self.nwalkers,
dim=self.ndim,
logl=self.logl,
logp=self.logp,
logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,),
betas=self.betas,
a=proposal_scale_factor,
)
p0 = self._generate_initial_p0()
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
# Run initialisation steps if required
ninit_steps = len(self.nsteps) - 2
for j, n in enumerate(self.nsteps[:-2]):
logging.info(
"Running {}/{} initialisation with {} steps".format(j, ninit_steps, n)
)
sampler = self._run_sampler(sampler, p0, nburn=n, window=window)
if create_plots:
fig, axes = self._plot_walkers(sampler, **kwargs)
fig.tight_layout()
fig.savefig(
"{}/{}_init_{}_walkers.png".format(self.outdir, self.label, j)
)
p0 = self._get_new_p0(sampler)
p0 = self._apply_corrections_to_p0(p0)
self._check_initial_points(p0)
sampler.reset()
if len(self.nsteps) > 1:
nburn = self.nsteps[-2]
else:
nburn = 0
nprod = self.nsteps[-1]
logging.info("Running final burn and prod with {} steps".format(nburn + nprod))
sampler = self._run_sampler(sampler, p0, nburn=nburn, nprod=nprod)
if create_plots:
try:
fig, axes = self._plot_walkers(sampler, nprod=nprod, **kwargs)
fig.tight_layout()
fig.savefig("{}/{}_walkers.png".format(self.outdir, self.label))
except RuntimeError as e:
logging.warning("Failed to save walker plots due to Erro {}".format(e))
samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = sampler.loglikelihood[:, :, nburn:]
self.samples = samples
self.chain = sampler.chain
self.lnprobs = lnprobs
self.lnlikes = lnlikes
self.all_lnlikelihood = all_lnlikelihood
self._save_data(
sampler, samples, lnprobs, lnlikes, all_lnlikelihood, sampler.chain
)
return sampler
def _get_rescale_multiplier_for_key(self, key):
""" Get the rescale multiplier from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 1
if "multiplier" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["multiplier"]
if type(val) == str:
if hasattr(self, val):
multiplier = getattr(
self, self.transform_dictionary[key]["multiplier"]
)
else:
raise ValueError("multiplier {} not a class attribute".format(val))
else:
multiplier = val
else:
multiplier = 1
return multiplier
def _get_rescale_subtractor_for_key(self, key):
""" Get the rescale subtractor from the transform_dictionary
Can either be a float, a string (in which case it is interpretted as
a attribute of the MCMCSearch class, e.g. minStartTime, or non-existent
in which case 0 is returned
"""
if key not in self.transform_dictionary:
return 0
if "subtractor" in self.transform_dictionary[key]:
val = self.transform_dictionary[key]["subtractor"]
if type(val) == str:
if hasattr(self, val):
subtractor = getattr(
self, self.transform_dictionary[key]["subtractor"]
)
else:
raise ValueError("subtractor {} not a class attribute".format(val))
else:
subtractor = val
else:
subtractor = 0
return subtractor
def _scale_samples(self, samples, theta_keys):
""" Scale the samples using the transform_dictionary """
for key in theta_keys:
if key in self.transform_dictionary:
idx = theta_keys.index(key)
s = samples[:, idx]
subtractor = self._get_rescale_subtractor_for_key(key)
s = s - subtractor
multiplier = self._get_rescale_multiplier_for_key(key)
s *= multiplier
samples[:, idx] = s
return samples
def _get_labels(self, newline_units=False):
""" Combine the units, symbols and rescaling to give labels """
labels = []
for key in self.theta_keys:
label = None
s = self.symbol_dictionary[key]
s.replace("_{glitch}", r"_\textrm{glitch}")
u = self.unit_dictionary[key]
if key in self.transform_dictionary:
if "symbol" in self.transform_dictionary[key]:
s = self.transform_dictionary[key]["symbol"]
if "label" in self.transform_dictionary[key]:
label = self.transform_dictionary[key]["label"]
if "unit" in self.transform_dictionary[key]:
u = self.transform_dictionary[key]["unit"]
if label is None:
if newline_units:
label = "{} \n [{}]".format(s, u)
else:
label = "{} [{}]".format(s, u)
labels.append(label)
return labels
def plot_corner(
self,
figsize=(7, 7),
add_prior=False,
nstds=None,
label_offset=0.4,
dpi=300,
rc_context={},
tglitch_ratio=False,
fig_and_axes=None,
save_fig=True,
**kwargs
):
""" Generate a corner plot of the posterior
Using the `corner` package (https://pypi.python.org/pypi/corner/),
generate estimates of the posterior from the production samples.
Parameters
----------
figsize: tuple (7, 7)
Figure size in inches (passed to plt.subplots)
add_prior: bool, str
If true, plot the prior as a red line. If 'full' then for uniform
priors plot the full extent of the prior.
nstds: float
The number of standard deviations to plot centered on the mean
label_offset: float
Offset the labels from the plot: useful to precent overlapping the
tick labels with the axis labels
dpi: int
Passed to plt.savefig
rc_context: dict
Dictionary of rc values to set while generating the figure (see
matplotlib rc for more details)
tglitch_ratio: bool
If true, and tglitch is a parameter, plot posteriors as the
fractional time at which the glitch occurs instead of the actual
time
fig_and_axes: tuple
fig and axes to plot on, the axes must be of the right shape,
namely (ndim, ndim)
save_fig: bool
If true, save the figure, else return the fig, axes
**kwargs:
Passed to corner.corner
Returns
-------
fig, axes:
The matplotlib figure and axes, only returned if save_fig = False
"""
if "truths" in kwargs and len(kwargs["truths"]) != self.ndim:
logging.warning("len(Truths) != ndim, Truths will be ignored")
kwargs["truths"] = None
if self.ndim < 2:
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, ax = plt.subplots(figsize=figsize)
else:
fig, ax = fig_and_axes
ax.hist(self.samples, bins=50, histtype="stepfilled")
ax.set_xlabel(self.theta_symbols[0])
fig.savefig("{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi)
return
with plt.rc_context(rc_context):
if fig_and_axes is None:
fig, axes = plt.subplots(self.ndim, self.ndim, figsize=figsize)
else:
fig, axes = fig_and_axes
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
if tglitch_ratio:
for j, k in enumerate(self.theta_keys):
if k == "tglitch":
s = samples_plt[:, j]
samples_plt[:, j] = (s - self.minStartTime) / (
self.maxStartTime - self.minStartTime
)
labels[j] = r"$R_{\textrm{glitch}}$"
if type(nstds) is int and "range" not in kwargs:
_range = []
for j, s in enumerate(samples_plt.T):
median = np.median(s)
std = np.std(s)
_range.append((median - nstds * std, median + nstds * std))
elif "range" in kwargs:
_range = kwargs.pop("range")
else:
_range = None
hist_kwargs = kwargs.pop("hist_kwargs", dict())
if "normed" not in hist_kwargs:
hist_kwargs["normed"] = True
fig_triangle = corner.corner(
samples_plt,
labels=labels,
fig=fig,
bins=50,
max_n_ticks=4,
plot_contours=True,
plot_datapoints=True,
# label_kwargs={'fontsize': 12},
data_kwargs={"alpha": 0.1, "ms": 0.5},
range=_range,
hist_kwargs=hist_kwargs,
**kwargs
)
axes_list = fig_triangle.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(8)
tick.label.set_rotation("horizontal")
for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(8)
tick.label.set_rotation("vertical")
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if add_prior:
self._add_prior_to_corner(axes, self.samples, add_prior)
if save_fig:
fig_triangle.savefig(
"{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi
)
else:
return fig, axes
def plot_chainconsumer(self, save_fig=True, label_offset=0.25, dpi=300, **kwargs):
""" Generate a corner plot of the posterior using chainconsumer
Parameters
----------
dpi: int
Passed to plt.savefig
**kwargs:
Passed to chainconsumer.plotter.plot
"""
if "truths" in kwargs and len(kwargs["truths"]) != self.ndim:
logging.warning("len(Truths) != ndim, Truths will be ignored")
kwargs["truths"] = None
samples_plt = copy.copy(self.samples)
labels = self._get_labels(newline_units=True)
samples_plt = self._scale_samples(samples_plt, self.theta_keys)
import chainconsumer
c = chainconsumer.ChainConsumer()
c.add_chain(samples_plt, parameters=labels)
c.configure(smooth=0, summary=False, sigma2d=True)
fig = c.plotter.plot(**kwargs)
axes_list = fig.get_axes()
axes = np.array(axes_list).reshape(self.ndim, self.ndim)
plt.draw()
for ax in axes[:, 0]:
ax.yaxis.set_label_coords(-label_offset, 0.5)
for ax in axes[-1, :]:
ax.xaxis.set_label_coords(0.5, -label_offset)
for ax in axes_list:
ax.set_rasterized(True)
ax.set_rasterization_zorder(-10)
# for tick in ax.xaxis.get_major_ticks():
# #tick.label.set_fontsize(8)
# tick.label.set_rotation('horizontal')
# for tick in ax.yaxis.get_major_ticks():
# #tick.label.set_fontsize(8)
# tick.label.set_rotation('vertical')
plt.tight_layout(h_pad=0.0, w_pad=0.0)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
if save_fig:
fig.savefig("{}/{}_corner.png".format(self.outdir, self.label), dpi=dpi)
else:
return fig
def _add_prior_to_corner(self, axes, samples, add_prior):
for i, key in enumerate(self.theta_keys):
ax = axes[i][i]
s = samples[:, i]
lnprior = self._generic_lnprior(**self.theta_prior[key])
if add_prior == "full" and self.theta_prior[key]["type"] == "unif":
lower = self.theta_prior[key]["lower"]
upper = self.theta_prior[key]["upper"]
r = upper - lower
xlim = [lower - 0.05 * r, upper + 0.05 * r]
x = np.linspace(xlim[0], xlim[1], 1000)
else:
xlim = ax.get_xlim()
x = np.linspace(s.min(), s.max(), 1000)
multiplier = self._get_rescale_multiplier_for_key(key)
subtractor = self._get_rescale_subtractor_for_key(key)
ax.plot(
(x - subtractor) * multiplier,
[np.exp(lnprior(xi)) for xi in x],
"-C3",
label="prior",
)
for j in range(i, self.ndim):
axes[j][i].set_xlim(xlim[0], xlim[1])
for k in range(0, i):
axes[i][k].set_ylim(xlim[0], xlim[1])
def plot_prior_posterior(self, normal_stds=2):
""" Plot the posterior in the context of the prior """
fig, axes = plt.subplots(nrows=self.ndim, figsize=(8, 4 * self.ndim))
N = 1000
from scipy.stats import gaussian_kde
for i, (ax, key) in enumerate(zip(axes, self.theta_keys)):
prior_dict = self.theta_prior[key]
prior_func = self._generic_lnprior(**prior_dict)
if prior_dict["type"] == "unif":
x = np.linspace(prior_dict["lower"], prior_dict["upper"], N)
prior = prior_func(x)
prior[0] = 0
prior[-1] = 0
elif prior_dict["type"] == "log10unif":
upper = prior_dict["log10upper"]
lower = prior_dict["log10lower"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
elif prior_dict["type"] == "norm":
lower = prior_dict["loc"] - normal_stds * prior_dict["scale"]
upper = prior_dict["loc"] + normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = prior_func(x)
elif prior_dict["type"] == "halfnorm":
lower = prior_dict["loc"]
upper = prior_dict["loc"] + normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
elif prior_dict["type"] == "neghalfnorm":
upper = prior_dict["loc"]
lower = prior_dict["loc"] - normal_stds * prior_dict["scale"]
x = np.linspace(lower, upper, N)
prior = [prior_func(xi) for xi in x]
else:
raise ValueError(
"Not implemented for prior type {}".format(prior_dict["type"])
)
priorln = ax.plot(x, prior, "C3", label="prior")
ax.set_xlabel(self.theta_symbols[i])
s = self.samples[:, i]
while len(s) > 10 ** 4:
# random downsample to avoid slow calculation of kde
s = np.random.choice(s, size=int(len(s) / 2.0))
kde = gaussian_kde(s)
ax2 = ax.twinx()
postln = ax2.plot(x, kde.pdf(x), "k", label="posterior")
ax2.set_yticklabels([])
ax.set_yticklabels([])
lns = priorln + postln
labs = [l.get_label() for l in lns]
axes[0].legend(lns, labs, loc=1, framealpha=0.8)
fig.savefig("{}/{}_prior_posterior.png".format(self.outdir, self.label))
def plot_cumulative_max(self, **kwargs):
""" Plot the cumulative twoF for the maximum posterior estimate
See the pyfstat.core.plot_twoF_cumulative function for further details
"""
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if "add_pfs" in kwargs:
self.generate_loudest()
if hasattr(self, "search") is False:
self._initiate_search_object()
if self.binary is False:
self.search.plot_twoF_cumulative(
self.label,
self.outdir,
F0=d["F0"],
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=self.minStartTime,
tend=self.maxStartTime,
**kwargs
)
else:
self.search.plot_twoF_cumulative(
self.label,
self.outdir,
F0=d["F0"],
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
asini=d["asini"],
period=d["period"],
ecc=d["ecc"],
argp=d["argp"],
tp=d["argp"],
tstart=self.minStartTime,
tend=self.maxStartTime,
**kwargs
)
def _generic_lnprior(self, **kwargs):
""" Return a lambda function of the pdf
Parameters
----------
**kwargs:
A dictionary containing 'type' of pdf and shape parameters
"""
def log_of_unif(x, a, b):
above = x < b
below = x > a
if type(above) is not np.ndarray:
if above and below:
return -np.log(b - a)
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(b - a)
return p
def log_of_log10unif(x, log10lower, log10upper):
log10x = np.log10(x)
above = log10x < log10upper
below = log10x > log10lower
if type(above) is not np.ndarray:
if above and below:
return -np.log(x * np.log(10) * (log10upper - log10lower))
else:
return -np.inf
else:
idxs = np.array([all(tup) for tup in zip(above, below)])
p = np.zeros(len(x)) - np.inf
p[idxs] = -np.log(x * np.log(10) * (log10upper - log10lower))
return p
def log_of_halfnorm(x, loc, scale):
if x < loc:
return -np.inf
else:
return -0.5 * (
(x - loc) ** 2 / scale ** 2 + np.log(0.5 * np.pi * scale ** 2)
)
def cauchy(x, x0, gamma):
return 1.0 / (np.pi * gamma * (1 + ((x - x0) / gamma) ** 2))
def exp(x, x0, gamma):
if x > x0:
return np.log(gamma) - gamma * (x - x0)
else:
return -np.inf
if kwargs["type"] == "unif":
return lambda x: log_of_unif(x, kwargs["lower"], kwargs["upper"])
if kwargs["type"] == "log10unif":
return lambda x: log_of_log10unif(
x, kwargs["log10lower"], kwargs["log10upper"]
)
elif kwargs["type"] == "halfnorm":
return lambda x: log_of_halfnorm(x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "neghalfnorm":
return lambda x: log_of_halfnorm(-x, kwargs["loc"], kwargs["scale"])
elif kwargs["type"] == "norm":
return lambda x: -0.5 * (
(x - kwargs["loc"]) ** 2 / kwargs["scale"] ** 2
+ np.log(2 * np.pi * kwargs["scale"] ** 2)
)
else:
logging.info("kwargs:", kwargs)
raise ValueError("Print unrecognise distribution")
def _generate_rv(self, **kwargs):
dist_type = kwargs.pop("type")
if dist_type == "unif":
return np.random.uniform(low=kwargs["lower"], high=kwargs["upper"])
if dist_type == "log10unif":
return 10 ** (
np.random.uniform(low=kwargs["log10lower"], high=kwargs["log10upper"])
)
if dist_type == "norm":
return np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
if dist_type == "halfnorm":
return np.abs(np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"]))
if dist_type == "neghalfnorm":
return -1 * np.abs(
np.random.normal(loc=kwargs["loc"], scale=kwargs["scale"])
)
if dist_type == "lognorm":
return np.random.lognormal(mean=kwargs["loc"], sigma=kwargs["scale"])
else:
raise ValueError("dist_type {} unknown".format(dist_type))
def _plot_walkers(
self,
sampler,
symbols=None,
alpha=0.8,
color="k",
temp=0,
lw=0.1,
nprod=0,
add_det_stat_burnin=False,
fig=None,
axes=None,
xoffset=0,
plot_det_stat=False,
context="ggplot",
labelpad=5,
):
""" Plot all the chains from a sampler """
if symbols is None:
symbols = self._get_labels()
if context not in plt.style.available:
raise ValueError(
(
"The requested context {} is not available; please select a"
" context from `plt.style.available`"
).format(context)
)
if np.ndim(axes) > 1:
axes = axes.flatten()
shape = sampler.chain.shape
if len(shape) == 3:
nwalkers, nsteps, ndim = shape
chain = sampler.chain[:, :, :].copy()
if len(shape) == 4:
ntemps, nwalkers, nsteps, ndim = shape
if temp < ntemps:
logging.info("Plotting temperature {} chains".format(temp))
else:
raise ValueError(
("Requested temperature {} outside of" "available range").format(
temp
)
)
chain = sampler.chain[temp, :, :, :].copy()
samples = chain.reshape((nwalkers * nsteps, ndim))
samples = self._scale_samples(samples, self.theta_keys)
chain = chain.reshape((nwalkers, nsteps, ndim))
if plot_det_stat:
extra_subplots = 1
else:
extra_subplots = 0
with plt.style.context((context)):
plt.rcParams["text.usetex"] = True
if fig is None and axes is None:
fig = plt.figure(figsize=(4, 3.0 * ndim))
ax = fig.add_subplot(ndim + extra_subplots, 1, 1)
axes = [ax] + [
fig.add_subplot(ndim + extra_subplots, 1, i)
for i in range(2, ndim + 1)
]
idxs = np.arange(chain.shape[1])
burnin_idx = chain.shape[1] - nprod
# if hasattr(self, 'convergence_idx'):
# last_idx = self.convergence_idx
# else:
last_idx = burnin_idx
if ndim > 1:
for i in range(ndim):
axes[i].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, i].T
if burnin_idx > 0:
axes[i].plot(
xoffset + idxs[: last_idx + 1],
cs[: last_idx + 1],
color="C3",
alpha=alpha,
lw=lw,
)
axes[i].axvline(xoffset + last_idx, color="k", ls="--", lw=0.5)
axes[i].plot(
xoffset + idxs[burnin_idx:],
cs[burnin_idx:],
color="k",
alpha=alpha,
lw=lw,
)
axes[i].set_xlim(0, xoffset + idxs[-1])
if symbols:
axes[i].set_ylabel(symbols[i], labelpad=labelpad)
# if subtractions[i] == 0:
# axes[i].set_ylabel(symbols[i], labelpad=labelpad)
# else:
# axes[i].set_ylabel(
# symbols[i]+'$-$'+symbols[i]+'$^\mathrm{s}$',
# labelpad=labelpad)
# if hasattr(self, 'convergence_diagnostic'):
# ax = axes[i].twinx()
# axes[i].set_zorder(ax.get_zorder()+1)
# axes[i].patch.set_visible(False)
# c_x = np.array(self.convergence_diagnosticx)
# c_y = np.array(self.convergence_diagnostic)
# break_idx = np.argmin(np.abs(c_x - burnin_idx))
# ax.plot(c_x[:break_idx], c_y[:break_idx, i], '-C0',
# zorder=-10)
# ax.plot(c_x[break_idx:], c_y[break_idx:, i], '-C0',
# zorder=-10)
# if self.convergence_test_type == 'autocorr':
# ax.set_ylabel(r'$\tau_\mathrm{exp}$')
# elif self.convergence_test_type == 'GR':
# ax.set_ylabel('PSRF')
# ax.ticklabel_format(useOffset=False)
else:
axes[0].ticklabel_format(useOffset=False, axis="y")
cs = chain[:, :, temp].T
if burnin_idx:
axes[0].plot(
idxs[:burnin_idx],
cs[:burnin_idx],
color="C3",
alpha=alpha,
lw=lw,
)
axes[0].plot(
idxs[burnin_idx:], cs[burnin_idx:], color="k", alpha=alpha, lw=lw
)
if symbols:
axes[0].set_ylabel(symbols[0], labelpad=labelpad)
axes[-1].set_xlabel(r"$\textrm{Number of steps}$", labelpad=0.2)
if plot_det_stat:
if len(axes) == ndim:
axes.append(fig.add_subplot(ndim + 1, 1, ndim + 1))
lnl = sampler.loglikelihood[temp, :, :]
if burnin_idx and add_det_stat_burnin:
burn_in_vals = lnl[:, :burnin_idx].flatten()
try:
twoF_burnin = (
burn_in_vals[~np.isnan(burn_in_vals)] - self.likelihoodcoef
)
axes[-1].hist(twoF_burnin, bins=50, histtype="step", color="C3")
except ValueError:
logging.info(
"Det. Stat. hist failed, most likely all "
"values where the same"
)
pass
else:
twoF_burnin = []
prod_vals = lnl[:, burnin_idx:].flatten()
try:
twoF = prod_vals[~np.isnan(prod_vals)] - self.likelihoodcoef
axes[-1].hist(twoF, bins=50, histtype="step", color="k")
except ValueError:
logging.info(
"Det. Stat. hist failed, most likely all "
"values where the same"
)
pass
if self.BSGL:
axes[-1].set_xlabel(r"$\mathcal{B}_\mathrm{S/GL}$")
else:
axes[-1].set_xlabel(r"$\widetilde{2\mathcal{F}}$")
axes[-1].set_ylabel(r"$\textrm{Counts}$")
combined_vals = np.append(twoF_burnin, twoF)
if len(combined_vals) > 0:
minv = np.min(combined_vals)
maxv = np.max(combined_vals)
Range = abs(maxv - minv)
axes[-1].set_xlim(minv - 0.1 * Range, maxv + 0.1 * Range)
xfmt = matplotlib.ticker.ScalarFormatter()
xfmt.set_powerlimits((-4, 4))
axes[-1].xaxis.set_major_formatter(xfmt)
return fig, axes
def _apply_corrections_to_p0(self, p0):
""" Apply any correction to the initial p0 values """
return p0
def _generate_scattered_p0(self, p):
""" Generate a set of p0s scattered about p """
p0 = [
[
p + self.scatter_val * p * np.random.randn(self.ndim)
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
return p0
def _generate_initial_p0(self):
""" Generate a set of init vals for the walkers """
if type(self.theta_initial) == dict:
logging.info("Generate initial values from initial dictionary")
if hasattr(self, "nglitch") and self.nglitch > 1:
raise ValueError("Initial dict not implemented for nglitch>1")
p0 = [
[
[
self._generate_rv(**self.theta_initial[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
elif self.theta_initial is None:
logging.info("Generate initial values from prior dictionary")
p0 = [
[
[
self._generate_rv(**self.theta_prior[key])
for key in self.theta_keys
]
for i in range(self.nwalkers)
]
for j in range(self.ntemps)
]
else:
raise ValueError("theta_initial not understood")
return p0
def _get_new_p0(self, sampler):
""" Returns new initial positions for walkers are burn0 stage
This returns new positions for all walkers by scattering points about
the maximum posterior with scale `scatter_val`.
"""
temp_idx = 0
pF = sampler.chain[temp_idx, :, :, :]
lnl = sampler.loglikelihood[temp_idx, :, :]
lnp = sampler.logprobability[temp_idx, :, :]
# General warnings about the state of lnp
if np.any(np.isnan(lnp)):
logging.warning(
"Of {} lnprobs {} are nan".format(np.shape(lnp), np.sum(np.isnan(lnp)))
)
if np.any(np.isposinf(lnp)):
logging.warning(
"Of {} lnprobs {} are +np.inf".format(
np.shape(lnp), np.sum(np.isposinf(lnp))
)
)
if np.any(np.isneginf(lnp)):
logging.warning(
"Of {} lnprobs {} are -np.inf".format(
np.shape(lnp), np.sum(np.isneginf(lnp))
)
)
lnp_finite = copy.copy(lnp)
lnp_finite[np.isinf(lnp)] = np.nan
idx = np.unravel_index(np.nanargmax(lnp_finite), lnp_finite.shape)
p = pF[idx]
p0 = self._generate_scattered_p0(p)
self.search.BSGL = False
twoF = self.logl(p, self.search)
self.search.BSGL = self.BSGL
logging.info(
(
"Gen. new p0 from pos {} which had det. stat.={:2.1f},"
" twoF={:2.1f} and lnp={:2.1f}"
).format(idx[1], lnl[idx], twoF, lnp_finite[idx])
)
return p0
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _save_data(self, sampler, samples, lnprobs, lnlikes, all_lnlikelihood, chain):
d = self._get_data_dictionary_to_save()
d["samples"] = samples
d["lnprobs"] = lnprobs
d["lnlikes"] = lnlikes
d["chain"] = chain
d["all_lnlikelihood"] = all_lnlikelihood
if os.path.isfile(self.pickle_path):
logging.info(
"Saving backup of {} as {}.old".format(
self.pickle_path, self.pickle_path
)
)
os.rename(self.pickle_path, self.pickle_path + ".old")
with open(self.pickle_path, "wb") as File:
pickle.dump(d, File)
def get_saved_data_dictionary(self):
""" Returns dictionary of the data saved in the pickle """
with open(self.pickle_path, "r") as File:
d = pickle.load(File)
return d
def _check_old_data_is_okay_to_use(self):
if os.path.isfile(self.pickle_path) is False:
logging.info("No pickled data found")
return False
if self.sftfilepattern is not None:
oldest_sft = min(
[os.path.getmtime(f) for f in self._get_list_of_matching_sfts()]
)
if os.path.getmtime(self.pickle_path) < oldest_sft:
logging.info("Pickled data outdates sft files")
return False
old_d = self.get_saved_data_dictionary().copy()
new_d = self._get_data_dictionary_to_save().copy()
old_d.pop("samples")
old_d.pop("lnprobs")
old_d.pop("lnlikes")
old_d.pop("all_lnlikelihood")
old_d.pop("chain")
for key in "minStartTime", "maxStartTime":
if new_d[key] is None:
new_d[key] = old_d[key]
setattr(self, key, new_d[key])
mod_keys = []
for key in list(new_d.keys()):
if key in old_d:
if new_d[key] != old_d[key]:
mod_keys.append((key, old_d[key], new_d[key]))
else:
raise ValueError("Keys {} not in old dictionary".format(key))
if len(mod_keys) == 0:
return True
else:
logging.warning("Saved data differs from requested")
logging.info("Differences found in following keys:")
for key in mod_keys:
if len(key) == 3:
if np.isscalar(key[1]) or key[0] == "nsteps":
logging.info(" {} : {} -> {}".format(*key))
else:
logging.info(" " + key[0])
else:
logging.info(key)
return False
def get_max_twoF(self, threshold=0.05):
""" Returns the max likelihood sample and the corresponding 2F value
Note: the sample is returned as a dictionary along with an estimate of
the standard deviation calculated from the std of all samples with a
twoF within `threshold` (relative) to the max twoF
"""
if any(np.isposinf(self.lnlikes)):
logging.info("lnlike values contain positive infinite values")
if any(np.isneginf(self.lnlikes)):
logging.info("lnlike values contain negative infinite values")
if any(np.isnan(self.lnlikes)):
logging.info("lnlike values contain nan")
idxs = np.isfinite(self.lnlikes)
jmax = np.nanargmax(self.lnlikes[idxs])
maxlogl = self.lnlikes[jmax]
d = OrderedDict()
if self.BSGL:
if hasattr(self, "search") is False:
self._initiate_search_object()
p = self.samples[jmax]
self.search.BSGL = False
maxtwoF = self.logl(p, self.search)
self.search.BSGL = self.BSGL
else:
maxtwoF = (maxlogl - self.likelihoodcoef) * 2
repeats = []
for i, k in enumerate(self.theta_keys):
if k in d and k not in repeats:
d[k + "_0"] = d[k] # relabel the old key
d.pop(k)
repeats.append(k)
if k in repeats:
k = k + "_0"
count = 1
while k in d:
k = k.replace("_{}".format(count - 1), "_{}".format(count))
count += 1
d[k] = self.samples[jmax][i]
return d, maxtwoF
def get_median_stds(self):
""" Returns a dict of the median and std of all production samples """
d = OrderedDict()
repeats = []
for s, k in zip(self.samples.T, self.theta_keys):
if k in d and k not in repeats:
d[k + "_0"] = d[k] # relabel the old key
d[k + "_0_std"] = d[k + "_std"]
d.pop(k)
d.pop(k + "_std")
repeats.append(k)
if k in repeats:
k = k + "_0"
count = 1
while k in d:
k = k.replace("_{}".format(count - 1), "_{}".format(count))
count += 1
d[k] = np.median(s)
d[k + "_std"] = np.std(s)
return d
def check_if_samples_are_railing(self, threshold=0.01):
""" Returns a boolean estimate of if the samples are railing
Parameters
----------
threshold: float [0, 1]
Fraction of the uniform prior to test (at upper and lower bound)
Returns
-------
return_flag: bool
IF true, the samples are railing
"""
return_flag = False
for s, k in zip(self.samples.T, self.theta_keys):
prior = self.theta_prior[k]
if prior["type"] == "unif":
prior_range = prior["upper"] - prior["lower"]
edges = []
fracs = []
for l in ["lower", "upper"]:
bools = np.abs(s - prior[l]) / prior_range < threshold
if np.any(bools):
edges.append(l)
fracs.append(str(100 * float(np.sum(bools)) / len(bools)))
if len(edges) > 0:
logging.warning(
"{}% of the {} posterior is railing on the {} edges".format(
"% & ".join(fracs), k, " & ".join(edges)
)
)
return_flag = True
return return_flag
def write_par(self, method="med"):
""" Writes a .par of the best-fit params with an estimated std """
logging.info(
"Writing {}/{}.par using the {} method".format(
self.outdir, self.label, method
)
)
median_std_d = self.get_median_stds()
max_twoF_d, max_twoF = self.get_max_twoF()
logging.info("Writing par file with max twoF = {}".format(max_twoF))
filename = "{}/{}.par".format(self.outdir, self.label)
with open(filename, "w+") as f:
f.write("MaxtwoF = {}\n".format(max_twoF))
f.write("tref = {}\n".format(self.tref))
if hasattr(self, "theta0_index"):
f.write("theta0_index = {}\n".format(self.theta0_idx))
if method == "med":
for key, val in median_std_d.items():
f.write("{} = {:1.16e}\n".format(key, val))
if method == "twoFmax":
for key, val in max_twoF_d.items():
f.write("{} = {:1.16e}\n".format(key, val))
def generate_loudest(self):
""" Use lalapps_ComputeFstatistic_v2 to produce a .loudest file """
self.write_par()
params = read_par(label=self.label, outdir=self.outdir)
for key in ["Alpha", "Delta", "F0", "F1"]:
if key not in params:
params[key] = self.theta_prior[key]
cmd = (
'lalapps_ComputeFstatistic_v2 -a {} -d {} -f {} -s {} -D "{}"'
' --refTime={} --outputLoudest="{}/{}.loudest" '
"--minStartTime={} --maxStartTime={}"
).format(
params["Alpha"],
params["Delta"],
params["F0"],
params["F1"],
self.sftfilepattern,
params["tref"],
self.outdir,
self.label,
self.minStartTime,
self.maxStartTime,
)
subprocess.call([cmd], shell=True)
def write_prior_table(self):
""" Generate a .tex file of the prior """
with open("{}/{}_prior.tex".format(self.outdir, self.label), "w") as f:
f.write(
r"\begin{tabular}{c l c} \hline" + "\n"
r"Parameter & & & \\ \hhline{====}"
)
for key, prior in self.theta_prior.items():
if type(prior) is dict:
Type = prior["type"]
if Type == "unif":
a = prior["lower"]
b = prior["upper"]
line = r"{} & $\mathrm{{Unif}}$({}, {}) & {}\\"
elif Type == "norm":
a = prior["loc"]
b = prior["scale"]
line = r"{} & $\mathcal{{N}}$({}, {}) & {}\\"
elif Type == "halfnorm":
a = prior["loc"]
b = prior["scale"]
line = r"{} & $|\mathcal{{N}}$({}, {})| & {}\\"
u = self.unit_dictionary[key]
s = self.symbol_dictionary[key]
f.write("\n")
a = helper_functions.texify_float(a)
b = helper_functions.texify_float(b)
f.write(" " + line.format(s, a, b, u) + r" \\")
f.write("\n\end{tabular}\n")
def print_summary(self):
""" Prints a summary of the max twoF found to the terminal """
max_twoFd, max_twoF = self.get_max_twoF()
median_std_d = self.get_median_stds()
logging.info("Summary:")
if hasattr(self, "theta0_idx"):
logging.info("theta0 index: {}".format(self.theta0_idx))
logging.info("Max twoF: {} with parameters:".format(max_twoF))
for k in np.sort(list(max_twoFd.keys())):
print(" {:10s} = {:1.9e}".format(k, max_twoFd[k]))
logging.info("Median +/- std for production values")
for k in np.sort(list(median_std_d.keys())):
if "std" not in k:
logging.info(
" {:10s} = {:1.9e} +/- {:1.9e}".format(
k, median_std_d[k], median_std_d[k + "_std"]
)
)
logging.info("\n")
def _CF_twoFmax(self, theta, twoFmax, ntrials):
Fmax = twoFmax / 2.0
return (
np.exp(1j * theta * twoFmax)
* ntrials
/ 2.0
* Fmax
* np.exp(-Fmax)
* (1 - (1 + Fmax) * np.exp(-Fmax)) ** (ntrials - 1)
)
def _pdf_twoFhat(self, twoFhat, nglitch, ntrials, twoFmax=100, dtwoF=0.1):
if np.ndim(ntrials) == 0:
ntrials = np.zeros(nglitch + 1) + ntrials
twoFmax_int = np.arange(0, twoFmax, dtwoF)
theta_int = np.arange(-1 / dtwoF, 1.0 / dtwoF, 1.0 / twoFmax)
CF_twoFmax_theta = np.array(
[
[
np.trapz(self._CF_twoFmax(t, twoFmax_int, ntrial), twoFmax_int)
for t in theta_int
]
for ntrial in ntrials
]
)
CF_twoFhat_theta = np.prod(CF_twoFmax_theta, axis=0)
pdf = (1 / (2 * np.pi)) * np.array(
[
np.trapz(
np.exp(-1j * theta_int * twoFhat_val) * CF_twoFhat_theta, theta_int
)
for twoFhat_val in twoFhat
]
)
return pdf.real
def _p_val_twoFhat(self, twoFhat, ntrials, twoFhatmax=500, Npoints=1000):
""" Caluculate the p-value for the given twoFhat in Gaussian noise
Parameters
----------
twoFhat: float
The observed twoFhat value
ntrials: int, array of len Nglitch+1
The number of trials for each glitch+1
"""
twoFhats = np.linspace(twoFhat, twoFhatmax, Npoints)
pdf = self._pdf_twoFhat(twoFhats, self.nglitch, ntrials)
return np.trapz(pdf, twoFhats)
def get_p_value(self, delta_F0, time_trials=0):
""" Get's the p-value for the maximum twoFhat value """
d, max_twoF = self.get_max_twoF()
if self.nglitch == 1:
tglitches = [d["tglitch"]]
else:
tglitches = [d["tglitch_{}".format(i)] for i in range(self.nglitch)]
tboundaries = [self.minStartTime] + tglitches + [self.maxStartTime]
deltaTs = np.diff(tboundaries)
ntrials = [time_trials + delta_F0 * dT for dT in deltaTs]
p_val = self._p_val_twoFhat(max_twoF, ntrials)
print("p-value = {}".format(p_val))
return p_val
def compute_evidence(self, make_plots=False, write_to_file=None):
""" Computes the evidence/marginal likelihood for the model """
betas = self.betas
mean_lnlikes = np.mean(np.mean(self.all_lnlikelihood, axis=1), axis=1)
mean_lnlikes = mean_lnlikes[::-1]
betas = betas[::-1]
if any(np.isinf(mean_lnlikes)):
print(
"WARNING mean_lnlikes contains inf: recalculating without"
" the {} infs".format(len(betas[np.isinf(mean_lnlikes)]))
)
idxs = np.isinf(mean_lnlikes)
mean_lnlikes = mean_lnlikes[~idxs]
betas = betas[~idxs]
log10evidence = np.trapz(mean_lnlikes, betas) / np.log(10)
z1 = np.trapz(mean_lnlikes, betas)
z2 = np.trapz(mean_lnlikes[::-1][::2][::-1], betas[::-1][::2][::-1])
log10evidence_err = np.abs(z1 - z2) / np.log(10)
logging.info(
"log10 evidence for {} = {} +/- {}".format(
self.label, log10evidence, log10evidence_err
)
)
if write_to_file:
EvidenceDict = self.read_evidence_file_to_dict(write_to_file)
EvidenceDict[self.label] = [log10evidence, log10evidence_err]
self.write_evidence_file_from_dict(EvidenceDict, write_to_file)
if make_plots:
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(6, 8))
ax1.semilogx(betas, mean_lnlikes, "-o")
ax1.set_xlabel(r"$\beta$")
ax1.set_ylabel(r"$\langle \log(\mathcal{L}) \rangle$")
min_betas = []
evidence = []
for i in range(int(len(betas) / 2.0)):
min_betas.append(betas[i])
lnZ = np.trapz(mean_lnlikes[i:], betas[i:])
evidence.append(lnZ / np.log(10))
ax2.semilogx(min_betas, evidence, "-o")
ax2.set_ylabel(
r"$\int_{\beta_{\textrm{Min}}}^{\beta=1}"
+ r"\langle \log(\mathcal{L})\rangle d\beta$",
size=16,
)
ax2.set_xlabel(r"$\beta_{\textrm{min}}$")
plt.tight_layout()
fig.savefig("{}/{}_beta_lnl.png".format(self.outdir, self.label))
return log10evidence, log10evidence_err
@staticmethod
def read_evidence_file_to_dict(evidence_file_name="Evidences.txt"):
EvidenceDict = OrderedDict()
if os.path.isfile(evidence_file_name):
with open(evidence_file_name, "r") as f:
for line in f:
key, log10evidence, log10evidence_err = line.split(" ")
EvidenceDict[key] = [float(log10evidence), float(log10evidence_err)]
return EvidenceDict
def write_evidence_file_from_dict(self, EvidenceDict, evidence_file_name):
with open(evidence_file_name, "w+") as f:
for key, val in EvidenceDict.items():
f.write("{} {} {}\n".format(key, val[0], val[1]))
class MCMCGlitchSearch(MCMCSearch):
"""MCMC search using the SemiCoherentGlitchSearch
See parent MCMCSearch for a list of all additional parameters, here we list
only the additional init parameters of this class.
Parameters
----------
nglitch: int
The number of glitches to allow
dtglitchmin: int
The minimum duration (in seconds) of a segment between two glitches
or a glitch and the start/end of the data
theta0_idx, int
Index (zero-based) of which segment the theta refers to - useful
if providing a tight prior on theta to allow the signal to jump
too theta (and not just from)
"""
symbol_dictionary = dict(
F0="$f$",
F1="$\dot{f}$",
F2="$\ddot{f}$",
Alpha=r"$\alpha$",
Delta="$\delta$",
delta_F0="$\delta f$",
delta_F1="$\delta \dot{f}$",
tglitch="$t_\mathrm{glitch}$",
)
unit_dictionary = dict(
F0="Hz",
F1="Hz/s",
F2="Hz/s$^2$",
Alpha=r"rad",
Delta="rad",
delta_F0="Hz",
delta_F1="Hz/s",
tglitch="s",
)
transform_dictionary = dict(
tglitch={
"multiplier": 1 / 86400.0,
"subtractor": "minStartTime",
"unit": "day",
"label": "$t^{g}_0$ \n [d]",
}
)
@helper_functions.initializer
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
dtglitchmin=1 * 86400,
theta0_idx=0,
nglitch=1,
):
if os.path.isdir(outdir) is False:
os.mkdir(outdir)
self._add_log_file()
logging.info(
(
"Set-up MCMC glitch search with {} glitches for model {}" " on data {}"
).format(self.nglitch, self.label, self.sftfilepattern)
)
self.pickle_path = "{}/{}_saved_data.p".format(self.outdir, self.label)
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
self._log_input()
self._set_likelihoodcoef()
def _set_likelihoodcoef(self):
self.likelihoodcoef = (self.nglitch + 1) * np.log(70.0 / self.rhohatmax ** 4)
def _initiate_search_object(self):
logging.info("Setting up search object")
self.search = core.SemiCoherentGlitchSearch(
label=self.label,
outdir=self.outdir,
sftfilepattern=self.sftfilepattern,
tref=self.tref,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
detectors=self.detectors,
BSGL=self.BSGL,
nglitch=self.nglitch,
theta0_idx=self.theta0_idx,
injectSources=self.injectSources,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def logp(self, theta_vals, theta_prior, theta_keys, search):
if self.nglitch > 1:
ts = (
[self.minStartTime]
+ list(theta_vals[-self.nglitch :])
+ [self.maxStartTime]
)
if np.array_equal(ts, np.sort(ts)) is False:
return -np.inf
if any(np.diff(ts) < self.dtglitchmin):
return -np.inf
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def logl(self, theta, search):
if self.nglitch > 1:
ts = (
[self.minStartTime] + list(theta[-self.nglitch :]) + [self.maxStartTime]
)
if np.array_equal(ts, np.sort(ts)) is False:
return -np.inf
for j, theta_i in enumerate(self.theta_idxs):
self.fixed_theta[theta_i] = theta[j]
twoF = search.get_semicoherent_nglitch_twoF(*self.fixed_theta)
return twoF / 2.0 + self.likelihoodcoef
def _unpack_input_theta(self):
glitch_keys = ["delta_F0", "delta_F1", "tglitch"]
full_glitch_keys = list(
np.array([[gk] * self.nglitch for gk in glitch_keys]).flatten()
)
if "tglitch_0" in self.theta_prior:
full_glitch_keys[-self.nglitch :] = [
"tglitch_{}".format(i) for i in range(self.nglitch)
]
full_glitch_keys[-2 * self.nglitch : -1 * self.nglitch] = [
"delta_F1_{}".format(i) for i in range(self.nglitch)
]
full_glitch_keys[-4 * self.nglitch : -2 * self.nglitch] = [
"delta_F0_{}".format(i) for i in range(self.nglitch)
]
full_theta_keys = ["F0", "F1", "F2", "Alpha", "Delta"] + full_glitch_keys
full_theta_keys_copy = copy.copy(full_theta_keys)
glitch_symbols = ["$\delta f$", "$\delta \dot{f}$", r"$t_{glitch}$"]
full_glitch_symbols = list(
np.array([[gs] * self.nglitch for gs in glitch_symbols]).flatten()
)
full_theta_symbols = [
"$f$",
"$\dot{f}$",
"$\ddot{f}$",
r"$\alpha$",
r"$\delta$",
] + full_glitch_symbols
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
if key in glitch_keys:
for i in range(self.nglitch):
self.theta_keys.append(key)
else:
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
if key in glitch_keys:
for i in range(self.nglitch):
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
else:
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in full_theta_keys]
self.theta_idxs = [full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [full_theta_symbols[i] for i in self.theta_idxs]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
# Correct for number of glitches in the idxs
self.theta_idxs = np.array(self.theta_idxs)
while np.sum(self.theta_idxs[:-1] == self.theta_idxs[1:]) > 0:
for i, idx in enumerate(self.theta_idxs):
if idx in self.theta_idxs[:i]:
self.theta_idxs[i] += 1
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
theta0_idx=self.theta0_idx,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _apply_corrections_to_p0(self, p0):
p0 = np.array(p0)
if self.nglitch > 1:
p0[:, :, -self.nglitch :] = np.sort(p0[:, :, -self.nglitch :], axis=2)
return p0
def plot_cumulative_max(self):
fig, ax = plt.subplots()
d, maxtwoF = self.get_max_twoF()
for key, val in self.theta_prior.items():
if key not in d:
d[key] = val
if self.nglitch > 1:
delta_F0s = [d["delta_F0_{}".format(i)] for i in range(self.nglitch)]
delta_F0s.insert(self.theta0_idx, 0)
delta_F0s = np.array(delta_F0s)
delta_F0s[: self.theta0_idx] *= -1
tglitches = [d["tglitch_{}".format(i)] for i in range(self.nglitch)]
elif self.nglitch == 1:
delta_F0s = [d["delta_F0"]]
delta_F0s.insert(self.theta0_idx, 0)
delta_F0s = np.array(delta_F0s)
delta_F0s[: self.theta0_idx] *= -1
tglitches = [d["tglitch"]]
tboundaries = [self.minStartTime] + tglitches + [self.maxStartTime]
for j in range(self.nglitch + 1):
ts = tboundaries[j]
te = tboundaries[j + 1]
if (te - ts) / 86400 < 5:
logging.info("Period too short to perform cumulative search")
continue
if j < self.theta0_idx:
summed_deltaF0 = np.sum(delta_F0s[j : self.theta0_idx])
F0_j = d["F0"] - summed_deltaF0
taus, twoFs = self.search.calculate_twoF_cumulative(
F0_j,
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=ts,
tend=te,
)
elif j >= self.theta0_idx:
summed_deltaF0 = np.sum(delta_F0s[self.theta0_idx : j + 1])
F0_j = d["F0"] + summed_deltaF0
taus, twoFs = self.search.calculate_twoF_cumulative(
F0_j,
F1=d["F1"],
F2=d["F2"],
Alpha=d["Alpha"],
Delta=d["Delta"],
tstart=ts,
tend=te,
)
ax.plot(ts + taus, twoFs)
ax.set_xlabel("GPS time")
fig.savefig("{}/{}_twoFcumulative.png".format(self.outdir, self.label))
class MCMCSemiCoherentSearch(MCMCSearch):
""" MCMC search for a signal using the semi-coherent ComputeFstat
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, defaults is `'data'`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (*?) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separate for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min float < 0, optional
The log_10(beta) value, if given the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary parameters
BSGL: bool, optional
If true, use the BSGL statistic
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search
assumeSqrtSX: float, optional
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}
nsegs: int
The number of segments
"""
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
nsegs=None,
):
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.nsegs = nsegs
if os.path.isdir(outdir) is False:
os.mkdir(outdir)
self._add_log_file()
logging.info(
("Set-up MCMC semi-coherent search for model {} on data" "{}").format(
self.label, self.sftfilepattern
)
)
self.pickle_path = "{}/{}_saved_data.p".format(self.outdir, self.label)
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._log_input()
if self.nsegs:
self._set_likelihoodcoef()
else:
logging.info("Value `nsegs` not yet provided")
def _set_likelihoodcoef(self):
self.likelihoodcoef = self.nsegs * np.log(70.0 / self.rhohatmax ** 4)
def _get_data_dictionary_to_save(self):
d = dict(
nsteps=self.nsteps,
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
BSGL=self.BSGL,
nsegs=self.nsegs,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
)
return d
def _initiate_search_object(self):
logging.info("Setting up search object")
self.search = core.SemiCoherentSearch(
label=self.label,
outdir=self.outdir,
tref=self.tref,
nsegs=self.nsegs,
sftfilepattern=self.sftfilepattern,
binary=self.binary,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
detectors=self.detectors,
injectSources=self.injectSources,
assumeSqrtSX=self.assumeSqrtSX,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def logp(self, theta_vals, theta_prior, theta_keys, search):
H = [
self._generic_lnprior(**theta_prior[key])(p)
for p, key in zip(theta_vals, theta_keys)
]
return np.sum(H)
def logl(self, theta, search):
for j, theta_i in enumerate(self.theta_idxs):
self.fixed_theta[theta_i] = theta[j]
twoF = search.get_semicoherent_twoF(*self.fixed_theta)
return twoF / 2.0 + self.likelihoodcoef
class MCMCFollowUpSearch(MCMCSemiCoherentSearch):
""" A follow up procudure increasing the coherence time in a zoom
Parameters
----------
theta_prior: dict
Dictionary of priors and fixed values for the search parameters.
For each parameters (key of the dict), if it is to be held fixed
the value should be the constant float, if it is be searched, the
value should be a dictionary of the prior.
tref, minStartTime, maxStartTime: int
GPS seconds of the reference time, start time and end time. While tref
is requirede, minStartTime and maxStartTime default to None in which
case all available data is used.
label, outdir: str
A label and output directory (optional, defaults is `'data'`) to
name files
sftfilepattern: str, optional
Pattern to match SFTs using wildcards (*?) and ranges [0-9];
mutiple patterns can be given separated by colons.
detectors: str, optional
Two character reference to the detectors to use, specify None for no
contraint and comma separate for multiple references.
nsteps: list (2,), optional
Number of burn-in and production steps to take, [nburn, nprod]. See
`pyfstat.MCMCSearch.setup_initialisation()` for details on adding
initialisation steps.
nwalkers, ntemps: int, optional
The number of walkers and temperates to use in the parallel
tempered PTSampler.
log10beta_min float < 0, optional
The log_10(beta) value, if given the set of betas passed to PTSampler
are generated from `np.logspace(0, log10beta_min, ntemps)` (given
in descending order to ptemcee).
theta_initial: dict, array, optional
A dictionary of distribution about which to distribute the
initial walkers about
rhohatmax: float, optional
Upper bound for the SNR scale parameter (required to normalise the
Bayes factor) - this needs to be carefully set when using the
evidence.
binary: bool, optional
If true, search over binary parameters
BSGL: bool, optional
If true, use the BSGL statistic
SSBPrec: int, optional
SSBPrec (SSB precision) to use when calling ComputeFstat
minCoverFreq, maxCoverFreq: float, optional
Minimum and maximum instantaneous frequency which will be covered
over the SFT time span as passed to CreateFstatInput
injectSources: dict, optional
If given, inject these properties into the SFT files before running
the search
assumeSqrtSX: float, optional
Don't estimate noise-floors, but assume (stationary) per-IFO sqrt{SX}
Attributes
----------
symbol_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), to Latex math
symbols for plots
unit_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), and the
units (i.e. `Hz`)
transform_dictionary: dict
Key, val pairs of the parameters (i.e. `F0`, `F1`), where the key is
itself a dictionary which can item `multiplier`, `subtractor`, or
`unit` by which to transform by and update the units.
"""
def __init__(
self,
theta_prior,
tref,
label,
outdir="data",
minStartTime=None,
maxStartTime=None,
sftfilepattern=None,
detectors=None,
nsteps=[100, 100],
nwalkers=100,
ntemps=1,
log10beta_min=-5,
theta_initial=None,
rhohatmax=1000,
binary=False,
BSGL=False,
SSBprec=None,
minCoverFreq=None,
maxCoverFreq=None,
injectSources=None,
assumeSqrtSX=None,
):
self.theta_prior = theta_prior
self.tref = tref
self.label = label
self.outdir = outdir
self.minStartTime = minStartTime
self.maxStartTime = maxStartTime
self.sftfilepattern = sftfilepattern
self.detectors = detectors
self.nsteps = nsteps
self.nwalkers = nwalkers
self.ntemps = ntemps
self.log10beta_min = log10beta_min
self.theta_initial = theta_initial
self.rhohatmax = rhohatmax
self.binary = binary
self.BSGL = BSGL
self.SSBprec = SSBprec
self.minCoverFreq = minCoverFreq
self.maxCoverFreq = maxCoverFreq
self.injectSources = injectSources
self.assumeSqrtSX = assumeSqrtSX
self.nsegs = None
if os.path.isdir(outdir) is False:
os.mkdir(outdir)
self._add_log_file()
logging.info(
("Set-up MCMC semi-coherent search for model {} on data" "{}").format(
self.label, self.sftfilepattern
)
)
self.pickle_path = "{}/{}_saved_data.p".format(self.outdir, self.label)
self._unpack_input_theta()
self.ndim = len(self.theta_keys)
if self.log10beta_min:
self.betas = np.logspace(0, self.log10beta_min, self.ntemps)
else:
self.betas = None
if args.clean and os.path.isfile(self.pickle_path):
os.rename(self.pickle_path, self.pickle_path + ".old")
self._log_input()
if self.nsegs:
self._set_likelihoodcoef()
else:
logging.info("Value `nsegs` not yet provided")
def _get_data_dictionary_to_save(self):
d = dict(
nwalkers=self.nwalkers,
ntemps=self.ntemps,
theta_keys=self.theta_keys,
theta_prior=self.theta_prior,
log10beta_min=self.log10beta_min,
BSGL=self.BSGL,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
run_setup=self.run_setup,
)
return d
def update_search_object(self):
logging.info("Update search object")
self.search.init_computefstatistic_single_point()
def get_width_from_prior(self, prior, key):
if prior[key]["type"] == "unif":
return prior[key]["upper"] - prior[key]["lower"]
def get_mid_from_prior(self, prior, key):
if prior[key]["type"] == "unif":
return 0.5 * (prior[key]["upper"] + prior[key]["lower"])
def read_setup_input_file(self, run_setup_input_file):
with open(run_setup_input_file, "r+") as f:
d = pickle.load(f)
return d
def write_setup_input_file(
self,
run_setup_input_file,
NstarMax,
Nsegs0,
nsegs_vals,
Nstar_vals,
theta_prior,
):
d = dict(
NstarMax=NstarMax,
Nsegs0=Nsegs0,
nsegs_vals=nsegs_vals,
theta_prior=theta_prior,
Nstar_vals=Nstar_vals,
)
with open(run_setup_input_file, "w+") as f:
pickle.dump(d, f)
def check_old_run_setup(self, old_setup, **kwargs):
try:
truths = [val == old_setup[key] for key, val in kwargs.items()]
if all(truths):
return True
else:
logging.info("Old setup doesn't match one of NstarMax, Nsegs0 or prior")
except KeyError as e:
logging.info("Error found when comparing with old setup: {}".format(e))
return False
def init_run_setup(
self,
run_setup=None,
NstarMax=1000,
Nsegs0=None,
log_table=True,
gen_tex_table=True,
):
if run_setup is None and Nsegs0 is None:
raise ValueError(
"You must either specify the run_setup, or Nsegs0 and NStarMax"
" from which the optimal run_setup can be estimated"
)
if run_setup is None:
logging.info("No run_setup provided")
run_setup_input_file = "{}/{}_run_setup.p".format(self.outdir, self.label)
if os.path.isfile(run_setup_input_file):
logging.info(
"Checking old setup input file {}".format(run_setup_input_file)
)
old_setup = self.read_setup_input_file(run_setup_input_file)
if self.check_old_run_setup(
old_setup,
NstarMax=NstarMax,
Nsegs0=Nsegs0,
theta_prior=self.theta_prior,
):
logging.info(
"Using old setup with NstarMax={}, Nsegs0={}".format(
NstarMax, Nsegs0
)
)
nsegs_vals = old_setup["nsegs_vals"]
Nstar_vals = old_setup["Nstar_vals"]
generate_setup = False
else:
generate_setup = True
else:
generate_setup = True
if generate_setup:
nsegs_vals, Nstar_vals = optimal_setup_functions.get_optimal_setup(
NstarMax,
Nsegs0,
self.tref,
self.minStartTime,
self.maxStartTime,
self.theta_prior,
self.search.detector_names,
)
self.write_setup_input_file(
run_setup_input_file,
NstarMax,
Nsegs0,
nsegs_vals,
Nstar_vals,
self.theta_prior,
)
run_setup = [
((self.nsteps[0], 0), nsegs, False) for nsegs in nsegs_vals[:-1]
]
run_setup.append(((self.nsteps[0], self.nsteps[1]), nsegs_vals[-1], False))
else:
logging.info("Calculating the number of templates for this setup")
Nstar_vals = []
for i, rs in enumerate(run_setup):
rs = list(rs)
if len(rs) == 2:
rs.append(False)
if np.shape(rs[0]) == ():
rs[0] = (rs[0], 0)
run_setup[i] = rs
if args.no_template_counting:
Nstar_vals.append([1, 1, 1])
else:
Nstar = optimal_setup_functions.get_Nstar_estimate(
rs[1],
self.tref,
self.minStartTime,
self.maxStartTime,
self.theta_prior,
self.search.detector_names,
)
Nstar_vals.append(Nstar)
if log_table:
logging.info("Using run-setup as follows:")
logging.info("Stage | nburn | nprod | nsegs | Tcoh d | resetp0 | Nstar")
for i, rs in enumerate(run_setup):
Tcoh = (self.maxStartTime - self.minStartTime) / rs[1] / 86400
if Nstar_vals[i] is None:
vtext = "N/A"
else:
vtext = "{:0.3e}".format(int(Nstar_vals[i]))
logging.info(
"{} | {} | {} | {} | {} | {} | {}".format(
str(i).ljust(5),
str(rs[0][0]).ljust(5),
str(rs[0][1]).ljust(5),
str(rs[1]).ljust(5),
"{:6.1f}".format(Tcoh),
str(rs[2]).ljust(7),
vtext,
)
)
if gen_tex_table:
filename = "{}/{}_run_setup.tex".format(self.outdir, self.label)
with open(filename, "w+") as f:
f.write(r"\begin{tabular}{c|ccc}" + "\n")
f.write(
r"Stage & $N_\mathrm{seg}$ &"
r"$T_\mathrm{coh}^{\rm days}$ &"
r"$\mathcal{N}^*(\Nseg^{(\ell)}, \Delta\mathbf{\lambda}^{(0)})$ \\ \hline"
"\n"
)
for i, rs in enumerate(run_setup):
Tcoh = float(self.maxStartTime - self.minStartTime) / rs[1] / 86400
line = r"{} & {} & {} & {} \\" + "\n"
if Nstar_vals[i] is None:
Nstar = "N/A"
else:
Nstar = Nstar_vals[i]
line = line.format(
i,
rs[1],
"{:1.1f}".format(Tcoh),
helper_functions.texify_float(Nstar),
)
f.write(line)
f.write(r"\end{tabular}" + "\n")
if args.setup_only:
logging.info("Exit as requested by setup_only flag")
sys.exit()
else:
return run_setup
def run(
self,
run_setup=None,
proposal_scale_factor=2,
NstarMax=10,
Nsegs0=None,
create_plots=True,
log_table=True,
gen_tex_table=True,
fig=None,
axes=None,
return_fig=False,
window=50,
**kwargs
):
""" Run the follow-up with the given run_setup
Parameters
----------
run_setup: list of tuples, optional
proposal_scale_factor: float
The proposal scale factor used by the sampler, see Goodman & Weare
(2010). If the acceptance fraction is too low, you can raise it by
decreasing the a parameter; and if it is too high, you can reduce
it by increasing the a parameter [Foreman-Mackay (2013)].
create_plots: bool
If true, save trace plots of the walkers
window: int
The minimum number of autocorrelation times needed to trust the
result when estimating the autocorrelation time (see
ptemcee.Sampler.get_autocorr_time for further details.
**kwargs:
Passed to _plot_walkers to control the figures
"""
self.nsegs = 1
self._set_likelihoodcoef()
self._initiate_search_object()
run_setup = self.init_run_setup(
run_setup,
NstarMax=NstarMax,
Nsegs0=Nsegs0,
log_table=log_table,
gen_tex_table=gen_tex_table,
)
self.run_setup = run_setup
self._estimate_run_time()
self.old_data_is_okay_to_use = self._check_old_data_is_okay_to_use()
if self.old_data_is_okay_to_use is True:
logging.warning("Using saved data from {}".format(self.pickle_path))
d = self.get_saved_data_dictionary()
self.samples = d["samples"]
self.lnprobs = d["lnprobs"]
self.lnlikes = d["lnlikes"]
self.all_lnlikelihood = d["all_lnlikelihood"]
self.chain = d["chain"]
self.nsegs = run_setup[-1][1]
return
nsteps_total = 0
for j, ((nburn, nprod), nseg, reset_p0) in enumerate(run_setup):
if j == 0:
p0 = self._generate_initial_p0()
p0 = self._apply_corrections_to_p0(p0)
elif reset_p0:
p0 = self._get_new_p0(sampler)
p0 = self._apply_corrections_to_p0(p0)
# self._check_initial_points(p0)
else:
p0 = sampler.chain[:, :, -1, :]
self.nsegs = nseg
self._set_likelihoodcoef()
self.search.nsegs = nseg
self.update_search_object()
self.search.init_semicoherent_parameters()
sampler = PTSampler(
ntemps=self.ntemps,
nwalkers=self.nwalkers,
dim=self.ndim,
logl=self.logl,
logp=self.logp,
logpargs=(self.theta_prior, self.theta_keys, self.search),
loglargs=(self.search,),
betas=self.betas,
a=proposal_scale_factor,
)
Tcoh = (self.maxStartTime - self.minStartTime) / nseg / 86400.0
logging.info(
(
"Running {}/{} with {} steps and {} nsegs " "(Tcoh={:1.2f} days)"
).format(j + 1, len(run_setup), (nburn, nprod), nseg, Tcoh)
)
sampler = self._run_sampler(
sampler, p0, nburn=nburn, nprod=nprod, window=window
)
logging.info(
"Max detection statistic of run was {}".format(
np.max(sampler.loglikelihood)
)
)
if create_plots:
fig, axes = self._plot_walkers(
sampler,
fig=fig,
axes=axes,
nprod=nprod,
xoffset=nsteps_total,
**kwargs
)
for ax in axes[: self.ndim]:
ax.axvline(nsteps_total, color="k", ls="--", lw=0.25)
nsteps_total += nburn + nprod
if create_plots:
nstep_list = np.array(
[el[0][0] for el in run_setup] + [run_setup[-1][0][1]]
)
mids = np.cumsum(nstep_list) - nstep_list / 2
mid_labels = ["{:1.0f}".format(i) for i in np.arange(0, len(mids) - 1)]
mid_labels += ["Production"]
for ax in axes[: self.ndim]:
axy = ax.twiny()
axy.tick_params(pad=-10, direction="in", axis="x", which="major")
axy.minorticks_off()
axy.set_xlim(ax.get_xlim())
axy.set_xticks(mids)
axy.set_xticklabels(mid_labels)
samples = sampler.chain[0, :, nburn:, :].reshape((-1, self.ndim))
lnprobs = sampler.logprobability[0, :, nburn:].reshape((-1))
lnlikes = sampler.loglikelihood[0, :, nburn:].reshape((-1))
all_lnlikelihood = sampler.loglikelihood
self.samples = samples
self.lnprobs = lnprobs
self.lnlikes = lnlikes
self.all_lnlikelihood = all_lnlikelihood
self._save_data(
sampler, samples, lnprobs, lnlikes, all_lnlikelihood, sampler.chain
)
if create_plots:
try:
fig.tight_layout()
except (ValueError, RuntimeError) as e:
logging.warning("Tight layout encountered {}".format(e))
if return_fig:
return fig, axes
else:
fig.savefig("{}/{}_walkers.png".format(self.outdir, self.label))
class MCMCTransientSearch(MCMCSearch):
""" MCMC search for a transient signal using ComputeFstat
See parent MCMCSearch for a list of all additional parameters, here we list
only the additional init parameters of this class.
"""
symbol_dictionary = dict(
F0="$f$",
F1="$\dot{f}$",
F2="$\ddot{f}$",
Alpha=r"$\alpha$",
Delta="$\delta$",
transient_tstart="$t_\mathrm{start}$",
transient_duration="$\Delta T$",
)
unit_dictionary = dict(
F0="Hz",
F1="Hz/s",
F2="Hz/s$^2$",
Alpha=r"rad",
Delta="rad",
transient_tstart="s",
transient_duration="s",
)
transform_dictionary = dict(
transient_duration={
"multiplier": 1 / 86400.0,
"unit": "day",
"symbol": "Transient duration",
},
transient_tstart={
"multiplier": 1 / 86400.0,
"subtractor": "minStartTime",
"unit": "day",
"label": "Transient start-time \n days after minStartTime",
},
)
def _initiate_search_object(self):
logging.info("Setting up search object")
if not self.transientWindowType:
self.transientWindowType = "rect"
self.search = core.ComputeFstat(
tref=self.tref,
sftfilepattern=self.sftfilepattern,
minCoverFreq=self.minCoverFreq,
maxCoverFreq=self.maxCoverFreq,
detectors=self.detectors,
transientWindowType=self.transientWindowType,
minStartTime=self.minStartTime,
maxStartTime=self.maxStartTime,
BSGL=self.BSGL,
binary=self.binary,
injectSources=self.injectSources,
tCWFstatMapVersion=self.tCWFstatMapVersion,
)
if self.minStartTime is None:
self.minStartTime = self.search.minStartTime
if self.maxStartTime is None:
self.maxStartTime = self.search.maxStartTime
def logl(self, theta, search):
for j, theta_i in enumerate(self.theta_idxs):
self.fixed_theta[theta_i] = theta[j]
in_theta = copy.copy(self.fixed_theta)
in_theta[1] = in_theta[0] + in_theta[1]
if in_theta[1] > self.maxStartTime:
return -np.inf
twoF = search.get_fullycoherent_twoF(*in_theta)
return twoF / 2.0 + self.likelihoodcoef
def _unpack_input_theta(self):
full_theta_keys = [
"transient_tstart",
"transient_duration",
"F0",
"F1",
"F2",
"Alpha",
"Delta",
]
if self.binary:
full_theta_keys += ["asini", "period", "ecc", "tp", "argp"]
full_theta_keys_copy = copy.copy(full_theta_keys)
full_theta_symbols = [
r"$t_{\rm start}$",
r"$\Delta T$",
"$f$",
"$\dot{f}$",
"$\ddot{f}$",
r"$\alpha$",
r"$\delta$",
]
if self.binary:
full_theta_symbols += ["asini", "period", "period", "ecc", "tp", "argp"]
self.theta_keys = []
fixed_theta_dict = {}
for key, val in self.theta_prior.items():
if type(val) is dict:
fixed_theta_dict[key] = 0
self.theta_keys.append(key)
elif type(val) in [float, int, np.float64]:
fixed_theta_dict[key] = val
else:
raise ValueError(
"Type {} of {} in theta not recognised".format(type(val), key)
)
full_theta_keys_copy.pop(full_theta_keys_copy.index(key))
if len(full_theta_keys_copy) > 0:
raise ValueError(
("Input dictionary `theta` is missing the" "following keys: {}").format(
full_theta_keys_copy
)
)
self.fixed_theta = [fixed_theta_dict[key] for key in full_theta_keys]
self.theta_idxs = [full_theta_keys.index(k) for k in self.theta_keys]
self.theta_symbols = [full_theta_symbols[i] for i in self.theta_idxs]
idxs = np.argsort(self.theta_idxs)
self.theta_idxs = [self.theta_idxs[i] for i in idxs]
self.theta_symbols = [self.theta_symbols[i] for i in idxs]
self.theta_keys = [self.theta_keys[i] for i in idxs]
|
# Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
OpenMetadata REST Sink implementation for the ORM Profiler results
"""
from metadata.config.common import ConfigModel
from metadata.ingestion.api.common import Entity, WorkflowContext
from metadata.ingestion.api.sink import Sink, SinkStatus
from metadata.ingestion.ometa.ometa_api import OpenMetadata
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
from metadata.orm_profiler.utils import logger
logger = logger()
class MetadataRestSinkConfig(ConfigModel):
api_endpoint: str = None
class MetadataRestSink(Sink[Entity]):
config: MetadataRestSinkConfig
status: SinkStatus
def __init__(
self,
ctx: WorkflowContext,
config: MetadataRestSinkConfig,
metadata_config: MetadataServerConfig,
):
super().__init__(ctx)
self.config = config
self.metadata_config = metadata_config
self.status = SinkStatus()
self.wrote_something = False
self.metadata = OpenMetadata(self.metadata_config)
@classmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
):
config = MetadataRestSinkConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(ctx, config, metadata_config)
def get_status(self) -> SinkStatus:
return self.status
def close(self) -> None:
pass
def write_record(self, record: Entity) -> None:
pass
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 19 18:39:22 2018
@author: Paulo Andrade
"""
class LinearRegression:
def __init__ (self, x, t):
"""
Constructor de la clase
"""
self.x = x # valores de X
self.t = t # valores de t
self.n = len(x) # valor de n
def pending (self):
"""
Obtenemos el valor de la pendiente
"""
sumxt = 0.0 # Sumatoria XiTi
sumx = 0.0 # Sumatoria Xi
sumt = 0.0 # Sumatoria ti
sumt2 = 0.0 # Sumatoria ti al cuadrado
# Obtenemos las sumatorias
for i in range(0, self.n):
sumxt += self.x[i] * self.t[i]
sumx += self.x[i]
sumt += self.t[i]
sumt2 += pow(self.t[i], 2)
# Obtenemos la pendiente
b = ((self.n*sumxt) - (sumx*sumt)) / ((self.n*sumt2) - pow(sumt, 2))
return b
def a (self):
"""
Obtenemos la interseccion de la recta
"""
sumx = 0.0 # Sumatoria Xi
sumt = 0.0 # sumatoria ti
b = self.b()
# Obtenemos las sumatorias
for i in range(0, self.n):
sumx += self.x[i]
sumt += self.t[i]
# Obtenemos la interseccion de la recta
a = (sumx/self.n) - (b*(sumt/self.n))
return a
def prediction (self, t):
"""
Obtenmos la prediccion
"""
a = self.a()
b = self.b()
xi = a + (b*t)
return xi
|
from app.models.db.account import Account
from app.models.db.role import Role
from app.models.db.policy import Policy
from app.models.db.user import User
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch
test_account_object = {
"uuid" : "1234567890",
"foreign": True,
"name": "cloudsec",
"created_at": datetime.now(),
}
class TestAccount(TestCase):
def setUp(self):
self.account = Account(
uuid = test_account_object['uuid'],
foreign = test_account_object['foreign'],
name = test_account_object['name'],
created_at = test_account_object['created_at']
)
def test_policy_constructor(self):
assert self.account.uuid == test_account_object['uuid']
assert self.account.foreign == test_account_object['foreign']
assert self.account.name == test_account_object['name']
assert self.account.created_at == test_account_object['created_at']
assert self.account.updated_at == None
def test_add_role_account(self):
role_1 = Role(
name="role_1",
arn="arn_role_1"
)
role_2 = Role(
name="role_2",
arn="arn_role_2"
)
self.account.roles.append(role_1)
assert len(self.account.roles) == 1
assert self.account.roles[0] == role_1
self.account.roles.append(role_2)
assert len(self.account.roles) == 2
def test_add_user_account(self):
user_1 =User(
name="1",
arn="arn_1"
)
user_2 = User(
name="2",
arn="arn_2"
)
self.account.users.append(user_1)
assert self.account.users[0] == user_1
assert len(self.account.users) == 1
self.account.users.append(user_2)
assert len(self.account.users) == 2
def test_add_policy_to_account(self):
policy_1 = Policy(
name="1",
arn="arn_1"
)
policy_2 = Policy(
name="2",
arn="arn_2"
)
self.account.policies.append(policy_1)
assert self.account.policies[0] == policy_1
assert len(self.account.policies) == 1
self.account.policies.append(policy_2)
assert len(self.account.policies) == 2
@patch("app.models.db.policy.Policy")
@patch("app.models.db.policy.db")
def test_find_or_create(self, mock_db,mock_pol):
mock_pol.query.filter_by.return_value.first.return_value = "string"
pol = Policy.find_or_create("arn", "123123123")
assert pol == "string"
@patch("app.models.db.policy.Policy.query")
@patch("app.models.db.policy.db")
def test_find_or_create(self, mock_db,mock_pol):
mock_pol.filter_by.return_value.first.return_value = None
pol_new = Policy(arn="arn")
pol = Policy.find_or_create("arn", "123123123")
assert pol.arn == pol_new.arn
|
import os
top_dir = './caffe'
f = open('files.txt','a+')
d = open('directories.txt','a+')
def os_list_dir(top_dir,d,f):
for file in os.listdir(top_dir):
file_path = os.path.abspath(os.path.join(top_dir, file))
if os.path.isfile(file_path):
if file_path[-3:] != 'mat' and file_path[-3:] != '.so' and file_path[-3:] != 'rc3' and file_path[-3:] != 'jpg' and file_path[-3:] != 'xml' and file_path[-3:] != 'del' and file_path[-3:] != 'png' and file_path[-4:] != '.txt' and file_path[-4:] != '.pyc' and file_path[-2:] != '.o' and file_path[-2:] != '.d' and file_path[-3:] != 'pkl':
print >>f,'%s'%file_path
elif os.path.isdir(file_path):
print >>d,'%s'%file_path
os_list_dir(file_path,d,f)
os_list_dir(top_dir,d,f)
f.close()
|
import numpy as np
import torch
import torch.nn as nn
import torch_scatter
from .. import backbones_2d
from ..model_utils.misc import bilinear_interpolate_torch
from ..model_utils.weight_init import *
class HH3DVFE(nn.Module):
def __init__(self, model_cfg, num_point_features, voxel_size=None, point_cloud_range=None, **kwargs):
super().__init__()
self.point_cloud_range = torch.from_numpy(point_cloud_range).float().cuda()
self.model_cfg = model_cfg
self.feature_dim = model_cfg.FEATURE_DIM
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.bev_voxel_size = torch.from_numpy(np.array(model_cfg.VOXEL_SIZE_BEV)).float().cuda()
self.cyl_grid_shape = torch.from_numpy(np.array(model_cfg.GRID_SHAPE_CYL)).int().cuda()
self.cyl_range = torch.from_numpy(np.array(model_cfg.RV_RANGE)).float().cuda()
# calculate cylindircal view voxel size
crop_range = torch.tensor(self.cyl_range[[2,3]] - self.cyl_range[[0,1]]).float().cuda()
self.cyl_voxel_size = crop_range / self.cyl_grid_shape
self.cyl_scale_xy = self.cyl_grid_shape[0] * self.cyl_grid_shape[1]
self.cyl_scale_y = self.cyl_grid_shape[1]
# calculate bird-eye view grid shape
self.bev_range = torch.from_numpy(np.array(point_cloud_range[[0,1,3,4]])).float().cuda()
crop_range = self.bev_range[[2,3]] - self.bev_range[[0,1]]
self.bev_grid_shape = (crop_range / self.bev_voxel_size).round().int()
self.bev_scale_xy = self.bev_grid_shape[0] * self.bev_grid_shape[1]
self.bev_scale_y = self.bev_grid_shape[1]
self.input_transform = nn.Sequential(
nn.Conv1d(model_cfg.INPUT_DIM, self.feature_dim, 1, stride=1, padding=0, bias=False),
nn.BatchNorm1d(self.feature_dim, eps=1e-3, momentum=0.01),
nn.ReLU(inplace=True),
nn.Conv1d(self.feature_dim, self.feature_dim*2, 1, stride=1, padding=0, bias=False),
nn.BatchNorm1d(self.feature_dim*2, eps=1e-3, momentum=0.01),
nn.ReLU(inplace=True)
)
self.mvf_pointnet = nn.Sequential(
nn.Conv1d(self.feature_dim*2, self.feature_dim, 1, stride=1, padding=0, bias=False),
nn.BatchNorm1d(self.feature_dim, eps=1e-3, momentum=0.01),
nn.ReLU(inplace=True)
)
cyl_net_init_type = model_cfg.get('CYL_NET_INIT_TYPE', 'kaiming_uniform')
self.cyl_net = backbones_2d.PV_NET[model_cfg.CYL_NET_NAME](self.feature_dim, cyl_net_init_type)
pointnet_init_type = model_cfg.get('POINTNET_INIT_TYPE', 'kaiming_uniform')
self.init_weights(pointnet_init_type)
def init_weights(self, init_type):
for module_list in [self.input_transform, self.mvf_pointnet]:
for m in module_list.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
if init_type == 'kaiming_uniform':
kaiming_init(m, distribution='uniform')
elif init_type == 'kaiming_normal':
kaiming_init(m, distribution='normal')
elif init_type == 'xavier':
xavier_init(m)
elif init_type =='caffe2_xavier':
caffe2_xavier_init(m)
elif isinstance(m, nn.BatchNorm2d):
constant_init(m, 1)
def get_output_feature_dim(self):
return self.feature_dim
def forward(self, batch_dict, **kwargs):
"""
Args:
batch_dict:
voxels: (num_voxels, max_points_per_voxel, C)
voxel_num_points: optional (num_voxels)
**kwargs:
Returns:
vfe_features: (num_voxels, C)
"""
batch_size = batch_dict['batch_size']
points = batch_dict['points'] # (batch_idx, x, y, z, r)
points_cyl = batch_dict['points_cyl'] # (phi, z, rho)
cyl_idxs = batch_dict['points_cyl_idxs']
bev_idxs = batch_dict['points_bev_idxs']
bev_coords = torch.floor(bev_idxs).int()
cyl_coords = torch.floor(cyl_idxs).int()
points_xyz = points[:, [1,2,3]]
points_feature = points[:, 4:]
# # get unique bird-eye view and cylindrical view coordinates
bev_merge_coords = points[:, 0].int() * self.bev_scale_xy + bev_coords[:, 0] * \
self.bev_scale_y + bev_coords[:, 1]
bev_unq_coords, bev_unq_inv, bev_unq_cnt = torch.unique(bev_merge_coords, \
return_inverse=True, return_counts=True, dim=0)
cyl_merge_coords = points[:, 0].int() * self.cyl_scale_xy + cyl_coords[:, 0] * \
self.cyl_scale_y + cyl_coords[:, 1]
cyl_unq_coords, cyl_unq_inv, cyl_unq_cnt = torch.unique(cyl_merge_coords, \
return_inverse=True, return_counts=True, dim=0)
bev_f_center = points_xyz[:, [0,1]] - ((bev_coords.to(points_xyz.dtype) + 0.5) \
* self.bev_voxel_size + self.bev_range[[0,1]])
bev_f_mean = torch_scatter.scatter_mean(points_xyz, bev_unq_inv, dim=0)
bev_f_cluster = points_xyz - bev_f_mean[bev_unq_inv, :]
bev_f_cluster = bev_f_cluster[:, [0, 1]]
cyl_f_center = points_cyl[:, [0,1]] - ((cyl_coords.to(points_cyl.dtype) + 0.5) \
* self.cyl_voxel_size + self.cyl_range[[0,1]])
cyl_f_mean = torch_scatter.scatter_mean(points_cyl, cyl_unq_inv, dim=0)
cyl_f_cluster = points_cyl - cyl_f_mean[cyl_unq_inv, :]
cyl_f_cluster = cyl_f_cluster[:, [0, 1]]
distance = torch.sqrt(torch.sum(points_xyz**2, dim=1, keepdim=True))
mvf_input = torch.cat([points_xyz,
points_cyl,
bev_f_center,
cyl_f_center,
bev_f_cluster,
cyl_f_cluster,
distance,
points_feature
], dim=1).contiguous()
mvf_input = mvf_input.transpose(0, 1).unsqueeze(0)
pt_fea_in = self.input_transform(mvf_input)
pt_fea_cyl, pointwise_features = torch.chunk(pt_fea_in, 2, dim=1)
pt_fea_cyl = pt_fea_cyl.squeeze(0).transpose(0, 1)
cyl_fea_in = torch_scatter.scatter_max(pt_fea_cyl, cyl_unq_inv, dim=0)[0]
voxel_coords = torch.stack(
(cyl_unq_coords // self.cyl_scale_xy,
(cyl_unq_coords % self.cyl_scale_xy) // self.cyl_scale_y,
(cyl_unq_coords % self.cyl_scale_y) // 1, cyl_unq_coords % 1), dim=1
)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
batch_cyl_features = []
for batch_idx in range(batch_size):
spatial_feature = torch.zeros(
self.feature_dim,
self.cyl_scale_xy,
dtype=cyl_fea_in.dtype,
device=cyl_fea_in.device)
batch_mask = voxel_coords[:, 0] == batch_idx
this_coords = voxel_coords[batch_mask, :]
indices = this_coords[:, 1] + this_coords[:, 2] * self.cyl_grid_shape[0] + this_coords[:, 3]
indices = indices.type(torch.long)
pillars = cyl_fea_in[batch_mask, :]
pillars = pillars.t()
spatial_feature[:, indices] = pillars
batch_cyl_features.append(spatial_feature)
batch_cyl_features = torch.stack(batch_cyl_features, 0)
batch_cyl_features = batch_cyl_features.view(batch_size, self.feature_dim,
self.cyl_grid_shape[1], self.cyl_grid_shape[0])
batch_cyl_features = batch_cyl_features.permute(0,1,3,2)
batch_cyl_features = self.cyl_net(batch_cyl_features)
# bilinear interpolate to get pointwise features
cyl_point_idxs = cyl_idxs
point_cyl_feature_list = []
for k in range(batch_size):
bs_mask = points[:, 0] == k
cur_cyl_point_idxs = cyl_point_idxs[bs_mask, :]
cur_cyl_features = batch_cyl_features[k].permute(1, 2, 0)
point_cyl_features = bilinear_interpolate_torch(cur_cyl_features, cur_cyl_point_idxs[:, 1], cur_cyl_point_idxs[:, 0])
point_cyl_feature_list.append(point_cyl_features)
point_cyl_features = torch.cat(point_cyl_feature_list, dim=0)
point_cyl_features = point_cyl_features.transpose(0, 1).unsqueeze(0)
mvf_pt_fea = torch.cat([pointwise_features, point_cyl_features], dim=1)
mvf_pt_fea = self.mvf_pointnet(mvf_pt_fea)
mvf_pt_fea = mvf_pt_fea.squeeze(0).transpose(0, 1)
bev_max_fea = torch_scatter.scatter_max(mvf_pt_fea, bev_unq_inv, dim=0)[0]
voxel_coords = torch.stack((bev_unq_coords // self.bev_scale_xy,
(bev_unq_coords % self.bev_scale_xy) // self.bev_scale_y,
bev_unq_coords % self.bev_scale_y,
torch.zeros(bev_unq_coords.shape[0]).to(bev_unq_coords.device).int()
), dim=1)
voxel_coords = voxel_coords[:, [0, 3, 2, 1]]
rv_proj_dict = {}
rv_proj_dict.update({
'rv_point_idxs': cyl_idxs,
'rv_unq_coords': cyl_unq_coords,
'rv_unq_inv': cyl_unq_inv,
'rv_unq_cnt': cyl_unq_cnt,
})
bev_proj_dict = {}
bev_proj_dict.update({
'bev_point_idxs': bev_idxs,
'bev_unq_coords': bev_unq_coords,
'bev_unq_inv': bev_unq_inv,
'bev_unq_cnt': bev_unq_cnt,
})
batch_dict.update({
'point_xyz': points_xyz,
'point_features': mvf_pt_fea,
'point_raw_features': pointwise_features, # (1, C, N1+N2+...)
'point_cyl_features': point_cyl_features, # (1, C, N1+N2+...)
'point_batch_inds': points[:, 0].int(),
'rv_proj_dict': rv_proj_dict,
'bev_proj_dict': bev_proj_dict,
'pillar_features': bev_max_fea.contiguous(),
'voxel_coords': voxel_coords.contiguous()
})
return batch_dict
|
from math import *
import json
waypoint_file = open('FlyMission.json','r+')
plan = json.load(waypoint_file)
print(plan)
waypoint_list = plan[0]['mission_waypoints']
obstacle_file = open('Obstacles.json','r')
obstacle_list = json.load(obstacle_file)['stationary_obstacles']
obstacle_file.close()
class Point:
def __init__(self, xval=0.0, yval=0.0, zval=0.0):
self.x = xval
self.y = yval
self.z = zval
def PrintMe(self):
print("x=" + str(self.x) + " y=" + str(self.y) + "z=" + str(self.z))
class Waypoint(Point):
def __init__(self, aname, xval=0.0, yval=0.0, zval=0.0):
self.name = aname
self.x = xval
self.y = yval
self.z = zval
def PrintMe(self):
print(self.name + "x=" + str(self.x) + " y=" + str(self.y) + "z=" + str(self.z))
class Circle:
def __init__(self, pt:Point, rad): #pt:Point
self.center = pt
self.radius = rad
def PrintMe(self):
print("x=" + str(self.center.x) + " y=" + str(self.center.y) + " r=" + str(self.radius))
class Obstacle(Circle):
def __init__(self, aname, pt:Point, rad): #pt:Point
self.name = aname
self.center = pt
self.radius = rad
self.height = pt.z
def PrintMe(self):
print(self.name + " x=" + str(self.center.x) + " y=" + str(self.center.y) + " r=" + str(self.radius) + "z=" + str(self.height))
class Line:
def __init__(self, m, yint):
self.slope = m
self.yInt = yint
def PrintMe(self):
print("m=" + str(self.slope) + " b=" + str(self.yInt))
def GetLinePts(pt1, pt2):
m = (pt2.y - pt1.y) / (pt2.x - pt1.x)
b = pt1.y - (m * pt1.x)
return(Line(m, b))
def GetLineSlope(pt, m):
b = pt.y - (m * pt.x)
return(Line(m, b))
# Solve Quadratic returns a list of solutions to the quadratic formula
def SolveQuadratic(a, b, c):
d = b**2-4*a*c # discriminant
if d < 0:
return ([])
elif d == 0:
s1 = (-b)/(2*a)
return ([s1])
else:
s1 = (-b+sqrt(d))/(2*a)
s2 = (-b-sqrt(d))/(2*a)
return([s1, s2])
def GetIntersectLineCirc(aline, circ, height):
# Need to solve quadratic formula
# First, define some shorthand
m = aline.slope
bi = aline.yInt
x = circ.center.x
y = circ.center.y
r = circ.radius
# print("m=" + str(m) + " bi=" + str(bi) + " x=" + str(x) + " y=" + str(y) + " r=" + str(r)) # debug
# Next, compute a, b, and c
a = m**2 + 1
b = 2 * (bi * m - y * m - x)
c = x**2 + y**2 + bi**2 - r**2 - 2 * bi * y
# print("a=" + str(a) + " b=" + str(b) + " c=" + str(c)) # debug
# Now, apply the quadratic formula to get the 2 solutions
solns = SolveQuadratic(a, b, c)
# Now generate the points and return them
if len(solns) == 0:
return([])
elif len(solns) == 1:
return([Point(solns[0], m * solns[0] + bi, height)])
elif len(solns) == 2:
return([Point(solns[0], m * solns[0] + bi, height), Point(solns[1], m * solns[1] + bi, height)])
else:
return (-1) # This should never happen
def Midpoint(pt1, pt2):
return(Point((pt1.x + pt2.x) / 2, (pt1.y + pt2.y) / 2, (pt1.z + pt2.z) / 2))
def GetAvoidPoints(w1, w2, o1):
# Step 1: Find intersecting points between waypoint line and buffer circle
wline = GetLinePts(w1, w2)
# print("Waypoint line") # debug
# wline.PrintMe() # debug
SafetyMargin = o1.radius * 0.2
bcirc = Circle(o1.center, o1.radius + SafetyMargin)
# print("Buffer circle") # debug
# bcirc.PrintMe() # debug
aver_z = (w1.z + w2.z) / 2 #average height of w1 and w2
iPts = GetIntersectLineCirc(wline, bcirc, aver_z)
# Important! Check that intersecting points not between the two waypoints.
minx = min(w1.x, w2.x)
maxx = max(w1.x, w2.x)
miny = min(w1.y, w2.y)
maxy = max(w1.y, w2.y)
for pt in iPts:
if pt.x > maxx or pt.x < minx or pt.y > maxy or pt.y < miny:
return([])
# print("Intersecting points") # debug
# PrintPointList(iPts) # debug
# Step 2: Check how many intersections there are
if len(iPts) > 2 or len(iPts) < 0:
print("Error")
return(-1)
if len(iPts) == 0:
return([])
if len(iPts) > 0:
# Step 3: Compute the midpoint of the secant line
if len(iPts) == 1:
midPt = iPts[0]
else: # Two intersection points are found
midPt = Midpoint(iPts[0], iPts[1])
# Step 4: Get slope of perpendicular line
if wline.slope != 0:
pSlope = -1/wline.slope
else:
pSlope = 1000.0
# Step 5: Generate perpendicular line and double safety circle
pline = GetLineSlope(midPt, pSlope)
SafetyMargin = o1.radius * 0.2
bcirc2 = Circle(o1.center, o1.radius + 2 * SafetyMargin)
# Step 6: Find the intersection points and return them
return (GetIntersectLineCirc(pline, bcirc2, aver_z))
def checkSafe(pt, o):
# check if the points in the range of the obstacle
margin = o.radius * 0.2
return not (o.center.x - o.radius - margin < pt.x and pt.x < o.center.x + o.radius + margin and \
o.center.y - o.radius - margin < pt.y and pt.y < o.center.y + o.radius + margin and pt.z < o.height)
def getSafePts(pts, w2, o1):
safePts = []
for pt in pts:
#check new line between new waypoint and next waypoint
w1 = Waypoint("new", pt.x, pt.y, pt.z)
points = GetAvoidPoints(w1, w2, o1)
if points != []:
continue
#check new waypoint with other obstacle
if all(checkSafe(pt, o) for o in ObstacleList):
safePts.append(pt)
# safePts.add(pt)
if len(safePts) == 0:
#reduce the margin but for now just return pts
return pts
return safePts
def FixSingleSegment():
global WaypointSeq
prevPt = WaypointSeq[0]
for i in range(1, len(WaypointSeq)):
for ob in ObstacleList:
# height checking
min_h = min(prevPt.z, WaypointSeq[i].z)
if min_h > ob.center.z + 20:
continue
averg_h = (prevPt.z + WaypointSeq[i].z) / 2
aPts = GetAvoidPoints(prevPt, WaypointSeq[i], ob)
if len(aPts) > 0: # Crossing
#check aPts position
safePts = getSafePts(aPts, WaypointSeq[i], ob)
WaypointSeq.insert(i, safePts[0])
return(False)
prevPt = WaypointSeq[i]
return(True)
def SolveProblem():
done = False
print('waypt\n',WaypointSeq)
while not(done):
DrawSolution(WaypointSeq, ObstacleList)
done = FixSingleSegment()
#######################################################
# Test Code
#######################################################
WaypointSeq = []
ObstacleList = []
# TestInitProblem just creates a set of waypoints and obstacles for testing
def create_waypoints(waypt_list):
return [Waypoint('w'+str(waypt['order']),waypt['latitude'],waypt['longitude'],waypt['altitude_msl']) for waypt in sorted(waypt_list,key = lambda x:x['order'])]
def create_obstacles(obs_list):
return [Obstacle('o'+str(i),Point(obs['latitude'],obs['longitude'],obs['cylinder_height']),obs['cylinder_radius']) for i,obs in enumerate(obs_list)]
def TestInitProblem():
global WaypointSeq
global ObstacleList
WaypointSeq = [Waypoint('w1', 10, 500, 50), Waypoint('w2', 600, 550, 100), Waypoint('w3', 1200, 500, 200)]
ObstacleList = [Obstacle('o1', Point(500,500,50), 50), Obstacle('o2', Point(1000,500,75), 50)]
WaypointSeq = [Waypoint('w1', 10, 500, 50), Waypoint('w2', 1000, 550, 100), Waypoint('w3', 1400, 500, 200)]
ObstacleList = [Obstacle('o1', Point(500,500,75), 50), Obstacle('o2', Point(1200,500,75), 50)] #
WaypointSeq = create_waypoints(waypoint_list)
ObstacleList =create_obstacles(obstacle_list)
def PrintWaypointSeq(wseq):
print("Waypoint Sequence")
for w in wseq:
w.PrintMe()
def jObstacleList(oseq):
print("Obstacle List")
for o1 in oseq:
o1.PrintMe()
def PrintPointList(pseq):
for p in pseq:
p.PrintMe()
#######################################################
# Drawing code (still test code)
#######################################################
from tkinter import *
def InitGui():
master = Tk()
w = Canvas(master, width=2000, height=1000)
w.pack()
return(w)
def StartGui():
mainloop()
def DrawWaypoint(myCanvas, pt):
PR = 3
x = pt.x
y = pt.y
z = pt.z
item = myCanvas.create_oval(pt.x-PR, pt.y-PR, pt.x+PR, pt.y+PR, fill="blue", outline="black")
###test zone
myCanvas.create_text(x+2*PR,y+2*PR,text="height: {}".format(z),fill="green")
def DrawObstacle(myCanvas, o1):
x = o1.center.x
y = o1.center.y
r = o1.radius
h = o1.center.z
myCanvas.create_oval(x-r, y-r, x+r, y+r, fill="red", outline="black")
myCanvas.create_text(x-r-5,y-r-5,text="height: {}".format(h))
def DrawLineSeg(myCanvas, pt1, pt2):
myCanvas.create_line(pt1.x, pt1.y, pt2.x, pt2.y, fill="blue")
def DrawWaypointSeq(myCanvas, wseq):
DrawWaypoint(myCanvas, wseq[0])
prevPt = wseq[0]
for i in range(1, len(wseq)):
DrawWaypoint(myCanvas, wseq[i])
DrawLineSeg(myCanvas, prevPt, wseq[i])
prevPt = wseq[i]
def DrawSolution(wseq, olist):
w = InitGui()
DrawWaypointSeq(w, wseq)
for ob in olist:
DrawObstacle(w, ob)
StartGui()
### Some test code
TestInitProblem()
#x = GetAvoidPoints(WaypointSeq[0], WaypointSeq[1], ObstacleList[0])
#PrintWaypointSeq(WaypointSeq)
#PrintObstacleList(ObstacleList)
#main()
#DrawSolution(WaypointSeq, ObstacleList)
SolveProblem()
plan[0]['mission_waypoints'] = [{'altitude_msl':w.z,'latitude':w.x,'longitude':w.y,'order':i+1} for i,w in enumerate(WaypointSeq)]
waypoint_file.seek(0)
json.dump(plan,waypoint_file)
waypoint_file.truncate()
waypoint_file.close()
#DrawSolution(WaypointSeq, ObstacleList)
|
from setuptools import setup, find_packages
from distutils.extension import Extension
try:
from Cython.Distutils import build_ext
except ImportError:
use_cython = False
else:
use_cython = True
cmdclass = {}
ext_modules = []
if use_cython:
ext_modules.append(Extension("pydepta.trees_cython", ['pydepta/trees_cython.pyx']))
cmdclass.update({'build_ext': build_ext})
else:
ext_modules.append(Extension("pydepta.trees_cython", ['pydepta/trees_cython.c']))
setup(name='pydepta',
version='0.2',
description="A Python implementation of DEPTA",
long_description="A Python implementation of DEPTA (Data Extraction with Partial Tree Alignment)",
author="Terry Peng",
author_email="pengtaoo@gmail.com",
install_requires=['w3lib', 'scrapely'],
packages=find_packages(),
cmdclass=cmdclass,
ext_modules=ext_modules
)
|
# -*- coding: utf-8 -*-
"""
pytests for resource handlers
"""
import h5py
import numpy as np
import os
import pytest
from rex import TESTDATADIR
from rex.renewable_resource import NSRDB
def get_baseline(path, dset, ds_slice):
"""
Extract baseline data
"""
with h5py.File(path, mode='r') as f:
arr = f[dset][...]
return arr[ds_slice]
@pytest.mark.parametrize('ds_slice',
[(slice(None), list(range(5, 100, 20))),
(slice(None), [23, 24, 27, 21, 1, 2, 7, 5]),
(list(range(8)), [23, 24, 27, 21, 1, 2, 7, 5]),
([230, 240, 270, 210, 1, 2, 7, 5],
[23, 24, 27, 21, 1, 2, 7, 5]),
(10, [23, 24, 27, 21, 1, 2, 7, 5]),
([230, 240, 270, 210, 1, 2, 7, 5], slice(None)),
([230, 240, 270, 210, 1, 2, 7, 5], list(range(8))),
([230, 240, 270, 210, 1, 2, 7, 5], 10),
(10, 10),
(list(range(5)), list(range(5)))])
def test_2d_list_gets(ds_slice):
"""
Test advanced list gets
"""
path = os.path.join(TESTDATADIR, 'nsrdb/nsrdb_wspd_chunked_2012.h5')
dset = 'wind_speed'
baseline = get_baseline(path, dset, ds_slice)
with NSRDB(path, unscale=False) as f:
dset_slice = (dset, ) + ds_slice
test = f[dset_slice]
assert np.allclose(baseline, test)
@pytest.mark.parametrize('ds_slice',
[(slice(None), list(range(5))),
(slice(None), [2, 3, 1, 4]),
(list(range(4)), [2, 3, 1, 4]),
(9, [2, 3, 1, 4]),
([2, 3, 1, 4], 9),
(slice(None), [2, 3, 1, 4], 9),
(9, [2, 3, 1, 4], 9),
(9, [2, 3, 1, 4], 9, [2, 3, 1, 4]),
(9, [2, 3, 1, 4], slice(None), [2, 3, 1, 4]),
(9, [2, 3, 1, 4], slice(None), 7),
([2, 3, 1, 4], [2, 3, 1, 4], [2, 3, 1, 4], 8),
([2, 3, 1, 4], [2, 3, 1, 4], [2, 3, 1, 4],
[2, 3, 1, 4]),
([2, 3, 1, 4], 8, [2, 3, 1, 4], 8),
(slice(None), slice(None), slice(None),
[2, 3, 1, 4]),
# These fail due to a numpy bug
# (8, slice(None), slice(None), [2, 3, 1, 4]),
# (slice(None), [2, 3, 1, 4], slice(None), 8),
])
def test_4d_list_gets(ds_slice):
"""
Test advanced list gets
"""
path = os.path.join(TESTDATADIR, 'wave/test_virutal_buoy.h5')
dset = 'directional_wave_spectrum'
baseline = get_baseline(path, dset, ds_slice)
with NSRDB(path, unscale=False) as f:
dset_slice = (dset, ) + ds_slice
test = f[dset_slice]
assert np.allclose(baseline, test)
def test_index_error():
"""
test incompatible list IndexError
"""
path = os.path.join(TESTDATADIR, 'nsrdb/nsrdb_wspd_chunked_2012.h5')
dset = 'wind_speed'
with pytest.raises(IndexError):
bad_slice = (list(range(5)), list(range(10)))
with NSRDB(path, unscale=False) as f:
dset_slice = (dset, ) + bad_slice
f[dset_slice] # pylint: disable=W0104
def execute_pytest(capture='all', flags='-rapP'):
"""Execute module as pytest with detailed summary report.
Parameters
----------
capture : str
Log or stdout/stderr capture option. ex: log (only logger),
all (includes stdout/stderr)
flags : str
Which tests to show logs and results for.
"""
fname = os.path.basename(__file__)
pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])
if __name__ == '__main__':
execute_pytest()
|
import sys
def input():
return sys.stdin.readline()[:-1]
N = int(input())
plus = []
minus = []
for _ in range(N):
x, y = map(int, input().split())
plus.append(x+y)
minus.append(x-y)
print(max(max(plus)-min(plus), max(minus)-min(minus)))
|
# -*- coding: utf-8 -*-
"""Base Sender, father of all other providers"""
import random
import threading
import time
from .template_parser import TemplateParser
class BaseSender(threading.Thread):
"""Base provider main class"""
template = None
freq = None
prob = 100
generator = None
def __init__(self, engine, template, **kwargs):
threading.Thread.__init__(self)
self.engine = engine
self.template = str(template)
self.prob = kwargs.get('prob', 100)
self.freq = kwargs.get('freq', (1, 1))
self.date_format = kwargs.get('date_format', "%Y-%m-%d %H:%M:%S.%f")
self.interactive = kwargs.get('interactive', False)
self.simulation = kwargs.get('simulation', False)
self.dont_remove_microseconds = kwargs.get('dont_remove_microseconds',
False)
self.parser = TemplateParser()
self.date_generator = kwargs.get('date_generator', None)
def process(self, date_generator=None, **kwargs):
"""Process template"""
return self.parser.process(self.template, date_generator, **kwargs)
def wait(self):
"""Time to wait between events"""
# freq[0] is the minimum
# freq[1] is the maximum
if self.freq[0] == self.freq[1]:
secs = self.freq[0]
elif self.freq[1] < self.freq[0]:
secs = random.uniform(self.freq[1], self.freq[0])
else:
secs = random.uniform(self.freq[0], self.freq[1])
time.sleep(secs)
def probability(self):
"""Calculate probability"""
k = random.randint(0, 100)
if k <= int(self.prob):
return True
return False
def run(self):
"""Run example (for override)"""
while True:
if self.probability():
# Do something
pass
self.wait()
|
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# This file contains small benchmarks with reasonably-sized problem/tiling sizes
# and codegen options.
from ..core.experts import *
from ..core.harness import *
from ..core.transforms import *
from ..core.utils import *
from .definitions import CopyProblem
from typing import List
fun_name = 'copy_1d'
op_name = 'linalg.copy'
################################################################################
### Compilation strategies.
################################################################################
# Problem size-specific transformation parameters: the tile size is the max
# divisible entry that fits within
def all_experts(fun_name: str, problem_sizes: List[int]):
sizes1 = l1_2d_divisible_tile_sizes(problem_sizes)
sizes_for_register_tiling = [ \
ts if ts > 0 else s for (s, ts) in zip(problem_sizes, sizes1) \
]
sizes2 = register_2d_divisible_tile_sizes(sizes_for_register_tiling)
# Before bufferization, the IR only has a tensor.extract_slice /
# tensor.insert_slice pair.
# Bufferization then properly introduces copy ops (implemented with
# linalg.generic)
# We want to make more these copies more efficient.
# In the case of a single copy benchmark it is the one true thing to optimize.
return [
# Note: `\` char at the end of next line prevents formatter reflows, keep it.
e.print_ir(after_all=False, at_begin=False, llvm=False) for e in [ \
Tile(fun_name=fun_name,
op_name=op_name,
tile_sizes=sizes2)
.then(Bufferize())
.then(Vectorize(fun_name=fun_name, op_name=''))
.then(LowerVectors())
.then(LowerToLLVM())
]
]
################################################################################
### Problem instantiations.
################################################################################
keys = ['m', 'n']
copy_2D_perf_search_list = [ \
[100, 32], # sweet spot for prefetchers, seems to maximize L1 BW @ 295GB/s
[ 50, 272], # 10% L2 load
[100, 272], # 20% L2 load
[150, 272], # 30% L2 load
[200, 272], # 40% L2 load
[250, 272], # 50% L2 load
[300, 272], # 60% L2 load
[350, 272], # 70% L2 load
[400, 272], # 80% L2 load
[450, 272], # 90% L2 load
[500, 272], # 100% L2 load
[5000, 272], # 40% L3 load
[10000, 272], # 80% L3 load
[15000, 272], # 120% L3 load
[30000, 272], # DRAM (2.4x L3 load)
[300000, 272], # DRAM (24x L3 load)
]
copy_2D_perf_relevant_sizes = [
[int(112 / 2) * int(112 / 2), 32 * 4], # approx. depthwise_conv_2d size
]
# CHECK-NOT: FAILURE
def main():
n_iters = 100
for problem_sizes in copy_2D_perf_search_list:
test_harness(lambda s, t: CopyProblem(dims=keys), [[np.float32] * 2],
test_sizes(keys, [problem_sizes]),
all_experts(fun_name, problem_sizes),
n_iters=n_iters,
function_name=fun_name,
dump_ir_to_file='/tmp/abc.mlir',
dump_obj_to_file='/tmp/abc.o')
if __name__ == '__main__':
main()
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.core.mail import send_mail, EmailMessage
from profiles.models import User
from .models import Internship, Application
from .forms import CreateInternshipForm, ApplyToInternshipForm
from teacher.views import teacher_login_required
from student.views import student_login_required
from portfolio.settings import EMAIL_HOST_USER
from datetime import date
@teacher_login_required
def create_internship(request, pk=None):
if pk:
title = 'Edit internship:'
internship = get_object_or_404(Internship, pk=pk)
else:
title = 'Create internship:'
internship = Internship()
internship.created_by = request.user
if request.method == "POST":
form = CreateInternshipForm(request.POST, instance=internship)
else:
form = CreateInternshipForm(instance=internship)
if request.method == "POST" and form.is_valid():
form.save()
students = User.objects.filter(is_student=True)
student_emails = list([student.email for student in students])
send_mail('New internship at {}: {}'.format(internship.company_name, internship.position),
internship.description,
'help.portfolio.apps@gmail.com',
student_emails,
)
messages.success(request, "Process finished successfully")
return redirect('internships')
return render(request, template_name='internship_edit.html', context={'form': form, 'title': title})
@student_login_required
def apply_to_internship(request, intern_id, pk=None):
title = 'Apply to internship:'
application = Application()
internship = Internship.objects.get(id=intern_id)
application.internship = internship
application.applicant = request.user.student
internship.applicants.add(request.user.student)
if request.method == "POST":
form = ApplyToInternshipForm(request.POST, request.FILES, instance=application)
else:
form = ApplyToInternshipForm(instance=application)
if request.method == "POST" and form.is_valid():
form.save()
messages.success(request, "Process finished successfully")
return redirect('internships')
return render(request, template_name='internship_apply.html', context={'form': form, 'title': title,
'intern_id': intern_id})
@student_login_required
def edit_application(request, intern_id):
title = 'Edit application:'
curr_internship = Internship.objects.get(id=intern_id)
my_app = Application.objects.get(internship=curr_internship)
application = get_object_or_404(Application, pk=my_app.id)
if application.sent:
messages.error(request, "You can't edit application that has been already sent")
return redirect('internships')
if curr_internship.deadline < date.today():
messages.error(request, "This internship is already overdue")
return redirect('internships')
if request.method == "POST":
form = ApplyToInternshipForm(request.POST, request.FILES, instance=application)
else:
form = ApplyToInternshipForm(instance=application)
if request.method == "POST" and form.is_valid():
form.save()
messages.success(request, "Process finished successfully")
return redirect('internships')
return render(request, template_name='internship_apply.html', context={'form': form, 'title': title,
'intern_id': intern_id})
@student_login_required
def apply_outer_intern(request, pk):
internship = get_object_or_404(Internship, pk=pk)
if internship.deadline < date.today():
messages.error(request, "This internship is already overdue")
return redirect('internships')
internship.applicants.add(request.user.student)
application, created = Application.objects.get_or_create(internship=internship,
applicant=request.user.student)
application.save()
return redirect(internship.link)
@student_login_required
def send_application(request, intern_id):
internship = Internship.objects.get(id=intern_id)
try:
application = Application.objects.get(internship=internship)
application.sent = True
application.save()
header = 'New application to {} internship'.format(internship.company_name)
student_name = request.user.first_name + ' ' + request.user.last_name
letter_body = '''
{} application to {} position in {}
Motivation letter:
{}
Find CV attached
'''.format(student_name, internship.position, internship.company_name,
application.motivation_letter)
teacher_email = [internship.created_by.email]
email = EmailMessage(
header, # letter header
letter_body, # letter body
EMAIL_HOST_USER, # from
teacher_email, # to
)
email.attach_file(application.cv.path)
email.send()
return redirect('internships')
except Application.DoesNotExist:
messages.warning(request, "Please save your application before sending it")
return redirect('apply_to_internship', intern_id)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ecl import utils
from ecl.storage import storage_service
from ecl import resource2
from ecl.storage import storage_service
class Volume(resource2.Resource):
resource_key = 'volume'
resources_key = 'volumes'
base_path = '/volumes'
service = storage_service.StorageService()
# capabilities
allow_list = True
allow_get = True
allow_create = True
allow_delete = True
allow_update = True
# Properties
#: id of volume(UUID)
id = resource2.Body('id')
#: status of volume
status = resource2.Body('status')
#: name of volume
name = resource2.Body('name')
#: description of volume
description = resource2.Body('description')
#: The size of volume in gigabyte
size = resource2.Body('size', type=int)
#: The provisioned IOPS/GB for volume
iops_per_gb = resource2.Body('iops_per_gb', type=int)
#: Array of initiator IQN who can access to this volume
initiator_iqns = resource2.Body('initiator_iqns', type=list)
#: Initiator's secret (password) for CHAP auth of iSCSI
initiator_secret = resource2.Body('initiator_secret')
#: Target's secret (password) for CHAP auth of iSCSI
target_secret = resource2.Body('target_secret')
#: Array of Snapshot IDs taken from this volume
snapshot_ids = resource2.Body('snapshot_ids', type=list)
#: Array of IPv4 addresses of the volume.
target_ips = resource2.Body('target_ips', type=list)
#: One or more metadata key and value pairs to associate with the volume.
metadata = resource2.Body('metadata', type=dict)
#: storage ID (UUID) volume belongs to
virtual_storage_id = resource2.Body('virtual_storage_id')
#: An availability_zone in which the volume belongs to
availability_zone = resource2.Body('availability_zone')
#: Creation timestamp of volume
created_at = resource2.Body('created_at')
#: update timestamp of volume
updated_at = resource2.Body('updated_at')
#: error description of volume
error_message = resource2.Body('error_message')
#: The provisioned throughput for volume in MB/s
throughput = resource2.Body('throughput', type=int)
#: Array of IPv4 CIDRc who can access to this volume
export_rules = resource2.Body('export_rules', type=list)
#: Percentage of Used Snapshots
percentage_snapshot_reserve_used = \
resource2.Body('percentage_snapshot_reserve_used', type=int)
def create(self, session, **attrs):
body = {"volume":attrs}
resp = session.post(
self.base_path, endpoint_filter=self.service,
json=body,
headers={"Accept": "application/json"}
)
self._translate_response(resp, has_body=True)
return self
def update(self, session, volume_id, has_body=True, **attrs):
uri = utils.urljoin(self.base_path, volume_id)
body = {"volume": attrs}
args = {'json': body}
resp = session.put(uri, endpoint_filter=self.service, **args)
self._translate_response(resp, has_body)
return self
class VolumeDetail(Volume):
base_path = '/volumes/detail'
allow_get = False
allow_create = False
allow_delete = False
allow_update = False
|
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType
class SnsChannelConfig(AWSProperty):
"""
`SnsChannelConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-devopsguru-notificationchannel-snschannelconfig.html>`__
"""
props: PropsDictType = {
"TopicArn": (str, False),
}
class NotificationChannelConfig(AWSProperty):
"""
`NotificationChannelConfig <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-devopsguru-notificationchannel-notificationchannelconfig.html>`__
"""
props: PropsDictType = {
"Sns": (SnsChannelConfig, False),
}
class NotificationChannel(AWSObject):
"""
`NotificationChannel <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-devopsguru-notificationchannel.html>`__
"""
resource_type = "AWS::DevOpsGuru::NotificationChannel"
props: PropsDictType = {
"Config": (NotificationChannelConfig, True),
}
class CloudFormationCollectionFilter(AWSProperty):
"""
`CloudFormationCollectionFilter <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-devopsguru-resourcecollection-cloudformationcollectionfilter.html>`__
"""
props: PropsDictType = {
"StackNames": ([str], False),
}
class ResourceCollectionFilter(AWSProperty):
"""
`ResourceCollectionFilter <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-devopsguru-resourcecollection-resourcecollectionfilter.html>`__
"""
props: PropsDictType = {
"CloudFormation": (CloudFormationCollectionFilter, False),
}
class ResourceCollection(AWSObject):
"""
`ResourceCollection <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-devopsguru-resourcecollection.html>`__
"""
resource_type = "AWS::DevOpsGuru::ResourceCollection"
props: PropsDictType = {
"ResourceCollectionFilter": (ResourceCollectionFilter, True),
}
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\relationships\relationship_bit_add.py
# Compiled at: 2020-06-09 03:31:35
# Size of source mod 2**32: 3429 bytes
from relationships.relationship_track import RelationshipTrack
from interactions.utils.loot_basic_op import BaseLootOperation
from relationships.relationship_bit import RelationshipBit
from sims4.tuning.tunable import TunableReference, TunableRange, TunableList, TunableTuple, Tunable
import interactions.utils, services, sims4
class RelationshipBitOnFilteredSims(BaseLootOperation):
FACTORY_TUNABLES = {'rel_bits':TunableList(description='\n List of relationship bits to add onto the sims that match the filter.\n ',
tunable=RelationshipBit.TunablePackSafeReference(description='\n A relationship bit to add onto the sims that match the filter.\n ')),
'relationship_score':Tunable(description='\n The relationship score to add to sims that match the filter.\n ',
default=1,
tunable_type=int),
'filter_settings':TunableTuple(sim_filter=TunableReference(description='\n A filter to apply on the sim population.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.SIM_FILTER))),
desired_sim_count=TunableRange(description='\n The desired number of Sims to add rel bits to.\n ',
tunable_type=int,
default=1,
minimum=1))}
def __init__(self, rel_bits, relationship_score, filter_settings, **kwargs):
(super().__init__)(**kwargs)
self._rel_bits = rel_bits
self._rel_score = relationship_score
self._filter_settings = filter_settings
@property
def loot_type(self):
return interactions.utils.LootType.RELATIONSHIP_BIT
def _apply_to_subject_and_target(self, subject, target, resolver):
relationship_tracker = subject.relationship_tracker
def filter_callback(filter_results, bouncer_request):
for result in filter_results:
for rel_bit in self._rel_bits:
relationship_tracker.add_relationship_score(result.sim_info.sim_id, self._rel_score)
relationship_tracker.add_relationship_bit(result.sim_info.sim_id, rel_bit)
filter_service = services.sim_filter_service()
filter_service.submit_matching_filter(number_of_sims_to_find=(self._filter_settings.desired_sim_count), sim_filter=(self._filter_settings.sim_filter),
callback=filter_callback,
blacklist_sim_ids={
subject.id},
gsi_source_fn=(lambda : 'RelationshipBitOnFilteredSims Loot: Adding {} to filtered sims'.format(str(self._rel_bits))))
|
from tinkoff_voicekit_client.STT.client_stt import ClientSTT
from tinkoff_voicekit_client.TTS.client_tts import ClientTTS
from tinkoff_voicekit_client.Operations import ClientOperations
from tinkoff_voicekit_client.uploader import Uploader
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.