content stringlengths 5 1.05M |
|---|
#Crie um programa que leia vários números inteiros pelo teclado. No final da execução,
# mostre a média entre todos os valores e qual foi o maior e o menor valores lidos.
# O programa deve perguntar ao usuário se ele quer ou não continuar a digitar valores.
'''n = int(input('Digite um número:'))
soma = n
cont = 1
media = 0
maior = n
menor = n
n1 = str(input('voce quer [qualquer coisa]parar ou [c]continuar:')).upper().strip()[0]
while n1 == 'C' :
n = int(input('Digite um número:'))
cont += 1
soma = soma + n
media = soma / cont
if cont == 1:
maior = n
menor = n
else:
if n > maior:
maior = n
if n < menor:
menor = n
n1 = str(input('voce quer [qualquer coisa]parar ou [c]continuar:')).upper().strip()
print('A média dos números digitados é {:.2f}'.format(media))
print('O maior número digitado é {} e o menor é {}'.format(maior, menor))'''
#jeito da aula
resp = 'S'
soma = cont = media = maior = menor = 0
while resp in 'Ss':
n = int(input('Digite um número:'))
soma += n
cont += 1
if cont == 1:
maior = menor = n
else:
if n > maior:
maior= n
if n < menor:
menor = n
resp = str(input('Quer continuar?[S/N]'))
media = soma / cont
print('Voce digitou {} números e a média é {}'.format(cont, media))
print('O maior númeoro digitado é {} e o menor é {}'.format(maior, menor)) |
from oslo.db.sqlalchemy import models
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy import Column, schema, Text
from sqlalchemy import Float, Integer, String, Boolean
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy.ext.declarative import declarative_base
from daoliproxy.db.sqlalchemy import types
from daoliproxy.openstack.common import uuidutils
BASE = declarative_base()
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class User(BASE, models.ModelBase, models.TimestampMixin):
"""Represents a User."""
__tablename__ = 'user'
uuid = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
username = Column(String(255), nullable=False)
password = Column(String(255), nullable=False)
email = Column(String(100), nullable=False)
type = Column(Integer, default=0)
phone = Column(String(11), nullable=False)
company = Column(String(100), nullable=False)
reason = Column(String(255), nullable=False)
enabled = Column(Boolean, default=True)
extra = Column(types.JsonBlob(), default=dict())
class UserTask(BASE, models.ModelBase):
__tablename__ = 'user_tasks'
id = Column(Integer, primary_key=True)
utype = Column(String(10), nullable=False)
# text column used for storing a json object of user register
uobj = Column(MediumText())
class UserLogin(BASE, models.ModelBase, models.TimestampMixin):
__tablename__ = 'user_login'
id = Column(Integer, primary_key=True)
user_id = Column(String(36), nullable=False)
user_addr = Column(String(17))
user_type = Column(String(255))
class Instance(BASE, models.ModelBase, models.TimestampMixin):
"""Represents a instance."""
__tablename__ = 'instances'
id = Column(String(36), primary_key=True)
name = Column(String(255), nullable=False)
address = Column(String(15))
mac_address = Column(String(17))
phy_ipv4 = Column(String(15))
host = Column(String(100))
project_id = Column(String(36), nullable=False)
user_id = Column(String(36), nullable=False)
availability_zone = Column(String(36), ForeignKey('zones.id'), nullable=False)
image = Column(String(36), nullable=False)
flavor = Column(String(36), nullable=False)
status = Column(String(10), default=None)
power_state = Column(Integer)
fake_hostname = Column(String(255))
class UserProject(BASE, models.ModelBase):
"""Represents a user with project map."""
__tablename__ = 'user_project'
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(36), nullable=False)
project_id = Column(String(36), nullable=False)
keystone_user_id = Column(String(36), nullable=False)
zone_id = Column(String(36), nullable=False)
total_instances = Column(Integer, default=10)
class Firewall(BASE, models.ModelBase):
"""Represents an port map."""
__tablename__ = 'firewalls'
__table_args__ = (
schema.UniqueConstraint("hostname", "gateway_port", name="uniq_hostname0gateway_port"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
hostname = Column(String(100), nullable=False)
gateway_port = Column(Integer, nullable=False)
service_port = Column(Integer, nullable=False)
instance_id = Column(String(36), nullable=False)
fake_zone = Column(Boolean, nullable=False)
class SingleSecurityGroup(BASE, models.ModelBase):
"""Represents an security group for instance."""
__tablename__ = 'single_security_groups'
__table_args__ = (
schema.UniqueConstraint(
"top", "bottom", "user_id",
name="uniq_single_security_group0top0bottom0user_id"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
top = Column(String(36), nullable=False)
bottom = Column(String(36), nullable=False)
user_id = Column(String(36), nullable=False)
class ProjectNetwork(BASE, models.ModelBase):
__tablename__ = 'project_networks'
id = Column(Integer, primary_key=True, autoincrement=True)
third = Column(Integer, nullable=False, default=0)
fourth = Column(Integer, nullable=False, default=2)
project_id = Column(String(36), nullable=False)
class Gateway(BASE, models.ModelBase):
__tablename__ = 'gateways'
__table_args__ = (
schema.UniqueConstraint('datapath_id', name='uniq_datapaht_id'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
datapath_id = Column(String(100), nullable=False)
hostname = Column(String(100), nullable=False)
idc_id = Column(Integer, default=0)
idc_mac = Column(String(64))
vint_dev = Column(String(100), nullable=False)
vint_mac = Column(String(64), nullable=False)
vext_dev = Column(String(100), nullable=False)
vext_ip = Column(String(64))
ext_dev = Column(String(100), nullable=False)
ext_mac = Column(String(64), nullable=False)
ext_ip = Column(String(64), nullable=False)
int_dev = Column(String(100), nullable=False)
int_mac = Column(String(64), nullable=False)
int_ip = Column(String(64))
zone = Column(String(36), nullable=False)
count = Column(Integer, nullable=False, default=0)
is_gateway = Column(Boolean, default=False)
disabled = Column(Boolean, default=False)
class Image(BASE, models.ModelBase):
__tablename__ = 'images'
id = Column(String(36), nullable=False, primary_key=True)
name = Column(String(255), nullable=False)
checksum = Column(String(32))
container_format = Column(String(32), nullable=False)
disk_format = Column(String(32), default='raw')
is_public = Column(Boolean, default=True)
min_disk = Column(Integer, default=0)
min_ram = Column(Integer, default=0)
size = Column(Integer, nullable=False)
owner = Column(String(32))
status = Column(String(32))
property = Column(types.JsonBlob())
display_format = Column(String(32))
zone = Column(String(36), nullable=False)
class Zone(BASE, models.ModelBase):
"""Represents a Zone."""
__tablename__ = 'zones'
id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
name = Column(String(255), nullable=False)
auth_url = Column(String(255), nullable=False)
token = Column(String(255), nullable=False)
default_instances = Column(Integer, default=10)
disabled = Column(Boolean, default=False)
idc_id = Column(Integer, default=0)
class Flavor(BASE, models.ModelBase):
__tablename__ = 'flavors'
id = Column(Integer, primary_key=True, autoincrement=True)
flavorid = Column(String(36), nullable=False)
name = Column(String(255))
vcpus = Column(Integer, nullable=False)
ram = Column(Integer, nullable=False)
disk = Column(Integer, nullable=False)
swap = Column(String(10), default='')
ephemeral = Column(Integer)
rxtx_factor = Column(Float, default=1)
is_public = Column(Boolean, default=True)
zone = Column(String(36), nullable=False)
class Resource(BASE, models.ModelBase, models.TimestampMixin):
__tablename__ = 'resources'
id = Column(Integer, primary_key=True, autoincrement=True)
source_name = Column(String(255), nullable=False)
source_id = Column(String(255), nullable=False)
action = Column(String(255), nullable=False)
extra = Column(types.JsonBlob())
project_id = Column(String(36), nullable=False)
user_id = Column(String(36), nullable=False)
################################
class IPAvailabilityRange(BASE, models.ModelBase):
__tablename__ = 'ipavailabilityranges'
allocation_pool_id = Column(String(36), ForeignKey('ipallocationpools.id',
ondelete="CASCADE"),
primary_key=True)
first_ip = Column(String(64), nullable=False)
last_ip = Column(String(64), nullable=False)
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class IPAllocationPool(BASE, models.ModelBase):
__tablename__ = 'ipallocationpools'
id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
subnet_id = Column(String(36), ForeignKey('subnets.id',
ondelete="CASCADE"))
first_ip = Column(String(64), nullable=False)
last_ip = Column(String(64), nullable=False)
available_ranges = relationship(IPAvailabilityRange,
backref='ipallocationpool',
lazy="joined",
cascade='delete')
def __repr__(self):
return "%s - %s" % (self.first_ip, self.last_ip)
class Subnet(BASE, models.ModelBase):
"""Represents a tenant subnet.
When a subnet is created the first and last entries will be created. These
are used for the IP allocation.
"""
__tablename__ = 'subnets'
id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
name = Column(String(255))
cidr = Column(String(64), nullable=False)
gateway_ip = Column(String(64))
net_type = Column(Integer, nullable=False)
user_id = Column(String(36), nullable=False)
allocation_pools = relationship(IPAllocationPool,
backref='subnet',
lazy="joined",
cascade='delete')
class Service(BASE, models.ModelBase):
"""Represents a api serivce."""
__tablename__ = 'services'
id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
name = Column(String(255))
url = Column(String(255), nullable=False)
topic = Column(String(20))
idc_id = Column(Integer, default=0)
class Network(BASE, models.ModelBase):
"""Represents a network."""
__tablename__ = 'networks'
id = Column(String(36), primary_key=True, default=uuidutils.generate_uuid)
gateway = Column(String(64))
netype = Column(Integer, nullable=False)
zone_id = Column(String(36), nullable=False)
class NetworkType(BASE, models.ModelBase):
__tablename__ = 'network_types'
id = Column(Integer, primary_key=True, autoincrement=True)
cidr = Column(String(64), nullable=False)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
!pip install Pillow==4.1.1
!pip install "fastai==0.7.0"
!pip install torchtext==0.2.3
!apt-get -qq install -y libsm6 libxext6 && pip install -q -U opencv-python
import cv2
from os import path
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
platform = '{}{}-{}'.format(get_abbr_impl(), get_impl_ver(), get_abi_tag())
!apt update && apt install -y libsm6 libxext6
accelerator = 'cu80' if path.exists('/opt/bin/nvidia-smi') else 'cpu'
!pip install -q http://download.pytorch.org/whl/{accelerator}/torch-0.3.0.post4-{platform}-linux_x86_64.whl torchvision
import torch
!pip install image
%matplotlib inline
from fastai.imports import *
|
from .market_calendar import MarketCalendar
from .exchange_calendar_asx import ASXExchangeCalendar
from .exchange_calendar_bmf import BMFExchangeCalendar
from .exchange_calendar_cboe import CFEExchangeCalendar
from .exchange_calendar_cme import \
CMEEquityExchangeCalendar, \
CMEBondExchangeCalendar
from .exchange_calendar_cme_globex_base import CMEGlobexBaseExchangeCalendar
from .exchange_calendar_cme_globex_agriculture import CMEGlobexAgricultureExchangeCalendar
from .exchange_calendar_cme_globex_fx import CMEGlobexFXExchangeCalendar
from .exchange_calendar_cme_globex_energy_and_metals import CMEGlobexEnergyAndMetalsExchangeCalendar
from .exchange_calendar_eurex import EUREXExchangeCalendar
from .exchange_calendar_hkex import HKEXExchangeCalendar
from .exchange_calendar_ice import ICEExchangeCalendar
from .exchange_calendar_iex import IEXExchangeCalendar
from .exchange_calendar_jpx import JPXExchangeCalendar
from .exchange_calendar_lse import LSEExchangeCalendar
from .exchange_calendar_nyse import NYSEExchangeCalendar
from .exchange_calendar_ose import OSEExchangeCalendar
from .exchange_calendar_sifma import SIFMAUSExchangeCalendar, SIFMAUKExchangeCalendar, SIFMAJPExchangeCalendar
from .exchange_calendar_six import SIXExchangeCalendar
from .exchange_calendar_sse import SSEExchangeCalendar
from .exchange_calendar_tsx import TSXExchangeCalendar
from .exchange_calendar_bse import BSEExchangeCalendar
from .exchange_calendar_tase import TASEExchangeCalendar
from .exchange_calendars_mirror import *
def get_calendar(name, open_time=None, close_time=None):
"""
Retrieves an instance of an MarketCalendar whose name is given.
:param name: The name of the MarketCalendar to be retrieved.
:param open_time: Market open time override as datetime.time object. If None then default is used.
:param close_time: Market close time override as datetime.time object. If None then default is used.
:return: MarketCalendar of the desired calendar.
"""
return MarketCalendar.factory(name, open_time=open_time, close_time=close_time)
def get_calendar_names():
"""All Market Calendar names and aliases that can be used in "factory"
:return: list(str)
"""
return MarketCalendar.calendar_names()
|
from pathlib import PurePath as _Path
files={
'pentalanine.h5' : str(_Path(__file__).parent.joinpath('pentalanine.h5')),
'pentalanine.inpcrd' : str(_Path(__file__).parent.joinpath('pentalanine.inpcrd')),
'pentalanine.prmtop' : str(_Path(__file__).parent.joinpath('pentalanine.prmtop')),
'metenkephalin.pdb' : str(_Path(__file__).parent.joinpath('metenkephalin.pdb')),
'trp-cage.pdb' : str(_Path(__file__).parent.joinpath('trp-cage.pdb')),
'trp-cage_solvated.pdb' : str(_Path(__file__).parent.joinpath('trp-cage_solvated.pdb')),
'1brs.pdb' : str(_Path(__file__).parent.joinpath('1brs.pdb')),
'1brs.mmtf' : str(_Path(__file__).parent.joinpath('1brs.mmtf')),
'1sux.pdb' : str(_Path(__file__).parent.joinpath('1sux.pdb')),
'1sux.mmtf' : str(_Path(__file__).parent.joinpath('1sux.mmtf')),
'1tcd.pdb' : str(_Path(__file__).parent.joinpath('1tcd.pdb')),
'1tcd.mmtf' : str(_Path(__file__).parent.joinpath('1tcd.mmtf')),
'caffeine.mol2' : str(_Path(__file__).parent.joinpath('caffeine.mol2')),
'HIF1_HS_Mod.pdb' : str(_Path(__file__).parent.joinpath('HIF1_HS_Mod.pdb'))
}
|
#
# voice-skill-sdk
#
# (C) 2020, Deutsche Telekom AG
#
# This file is distributed under the terms of the MIT license.
# For details see the file LICENSE in the top directory.
#
#
import yaml
import unittest
import logging
from unittest.mock import patch
from datetime import timezone as tz
from skill_sdk import swagger, skill
@skill.intent_handler('HELLO_INTENT')
def hello(timezone: tz):
pass
class TestSwagger(unittest.TestCase):
def test_swag(self):
swagger_json = swagger.swag('json')
self.assertIsInstance(swagger_json, dict)
schemas = swagger_json['components']['schemas']
self.assertIn('SkillInfoResponseDto', schemas)
self.assertIn('CardDto', schemas)
self.assertIn('SessionRequestDto', schemas)
self.assertIn('SessionResponseDto', schemas)
self.assertIn('SkillContextDto', schemas)
self.assertIn('InvokeSkillRequestDto', schemas)
self.assertIn('InvokeSkillResponseDto', schemas)
self.assertIn('PushNotificationDto', schemas)
self.assertIn('ResultDto', schemas)
@patch('yaml.safe_load', side_effect=yaml.YAMLError())
@patch('builtins.open', side_effect=OSError())
@patch.object(logging.Logger, 'error')
def test_create_spec_os_error(self, logger, *args):
swagger.create_spec()
logger.assert_called_once()
|
import os, sys
import subprocess
from datetime import datetime
dir = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(dir, '../'))
def subprocess_open_when_shell_true(command):
popen = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdoutdata, stderrdata) = popen.communicate()
return stdoutdata, stderrdata
class SFTP(object):
def __init__(self, ssh, config):
self.overwrite = ""
self.ssh = ssh
self.sftp = ssh.open_sftp()
self.local = config['local']['dir']
self.dest = config['dest']['dir']
try:
if config['option']['overwrite'] == 'false':
self.overwrite = "_" + str(datetime.now())[:16]
except:
pass
def dest_list(self, ssh, destpath):
dest_dict = {}
stdin, stdout, stderr = ssh.exec_command("ls -l " + destpath)
dest_file_path = stdout.read().decode('utf-8').split("\n")
for each in dest_file_path:
spl = each.split()
if len(spl) == 9:
dest_dict[spl[8]] = {"format": spl[0], "size": spl[4]}
return dest_dict
def local_list(self, localpath):
local_dict = {}
local_file_path = list(subprocess_open_when_shell_true("ls -l " + localpath))[0].decode('utf-8').split("\n")
for each in local_file_path:
spl = each.split()
if len(spl) == 9:
local_dict[spl[8]] = {"format": spl[0], "size": spl[4]}
return local_dict
def dir_proc(self, opt, localpath, destpath, filename):
add_list = []
if opt[:4] == "recv":
if opt == "recv_c":
subprocess_open_when_shell_true("mkdir " + localpath + "/" + filename)
dest_dict = self.dest_list(self.ssh, destpath + "/" + filename)
local_dict = self.local_list(localpath + "/" + filename)
for each in list(dest_dict.keys()):
dest_format = dest_dict[each]['format'][0]
dest_size = dest_dict[each]['size']
if each not in list(local_dict.keys()):
if dest_format is 'd':
add_list += self.dir_proc("recv_c", localpath + "/" + filename, destpath + "/" + filename, each)
else:
add_list.append([destpath + "/" + filename + "/" + each, localpath + "/" + filename + "/" + each])
else:
if dest_format is 'd':
add_list += self.dir_proc("recv_r", localpath + "/" + filename, destpath + "/" + filename, each)
else:
if dest_size != local_dict[each]['size']:
add_list.append([destpath + "/" + filename + "/" + each, localpath + "/" + filename + "/" + each + self.overwrite])
return add_list
elif opt[:4] == "send":
if opt == "send_c":
self.ssh.exec_command("mkdir " + destpath + "/" + filename)
dest_dict = self.dest_list(self.ssh, destpath + "/" + filename)
local_dict = self.local_list(localpath + "/" + filename)
for each in list(local_dict.keys()):
local_format = local_dict[each]['format'][0]
local_size = local_dict[each]['size']
if each not in list(dest_dict.keys()):
if local_format is 'd':
add_list += self.dir_proc("send_c", localpath + "/" + filename, destpath + "/" + filename, each)
else:
add_list.append([localpath + "/" + filename + "/" + each, destpath + "/" + filename + "/" + each])
else:
if local_format is 'd':
add_list += self.dir_proc("send_r", localpath + "/" + filename, destpath + "/" + filename, each)
else:
if local_size != dest_dict[each]['size']:
add_list.append([localpath + "/" + filename + "/" + each, destpath + "/" + filename + "/" + each + self.overwrite])
return add_list
def recv(self):
dest_dict = self.dest_list(self.ssh, self.dest)
local_dict = self.local_list(self.local)
get_list = []
for each in list(dest_dict.keys()):
dest_format = dest_dict[each]['format'][0]
dest_size = dest_dict[each]['size']
if each not in list(local_dict.keys()):
if dest_format is 'd':
get_list += self.dir_proc("recv_c", self.local, self.dest, each)
else:
get_list.append([self.dest + "/" + each, self.local + "/" + each])
else:
if dest_format is 'd':
get_list += self.dir_proc("recv_r", self.local, self.dest, each)
else:
if dest_size != local_dict[each]['size']:
get_list.append([self.dest + "/" + each, self.local + "/" + each + self.overwrite])
for each in get_list:
self.sftp.get(each[0], each[1])
# log record
log_f = open(os.path.join(dir[:-5], "log/" + str(datetime.now())[:-16] + "_recv.log"), "a")
log_f.write(str(datetime.now()) + " : " + each[0] + "\n")
log_f.close()
def send(self):
dest_dict = self.dest_list(self.ssh, self.dest)
local_dict = self.local_list(self.local)
put_list = []
for each in list(local_dict.keys()):
local_format = local_dict[each]['format'][0]
local_size = local_dict[each]['size']
if each not in list(dest_dict.keys()):
if local_format is 'd':
put_list += self.dir_proc("send_c", self.local, self.dest, each)
else:
put_list.append([self.local + "/" + each, self.dest + "/" + each])
else:
if local_format is 'd':
put_list += self.dir_proc("send_r", self.local, self.dest, each)
else:
if local_size != dest_dict[each]['size']:
put_list.append([self.local + "/" + each, self.dest + "/" + each + self.overwrite])
for each in put_list:
self.sftp.put(each[0], each[1])
# log record
log_f = open(os.path.join(dir[:-5], "log/" + str(datetime.now())[:-16] + "_send.log"), "a")
log_f.write(str(datetime.now()) + " : " + each[0] + "\n")
log_f.close()
|
def wordpresent(word, list):
flag =0
for ele in list:
ele = ele.split('/')
if(ele[0]==word):
flag=1
#print(flag)
return flag
list = ['शिंदे/p\n', 'श्री कृष्ण बिहारी वाजपेयी/p\n', 'पं० श्याम लाल बिहारी वाजपेयी/p\n', 'राष्ट्रीय अध्यक्ष/p\n', 'प्रधानमंत्री/p\n', 'फारुख खान/p\n', 'उमर अब्दुल्ला/p\n', 'राज्यपाल सत्यपाल मलिक/p\n', 'महबूबा मुफ्ती/p\n', 'वरयाम सिंह/p\n', 'ज्वाइंट कमिश्नर/p\n', 'जॉय थॉमस/p\n']
wordpresent('श्री कृष्ण बिहारी वाजपेयी',list)
l = "शिंदे श्री कृष्ण बिहारी वाजपेयी पं० श्याम लाल बिहारी"
l = l.replace('श्री कृष्ण बिहारी वाजपेयी','p')
print(l) |
import unittest
from pipelinewise_singer.schema import Schema
class TestSchema(unittest.TestCase):
# Raw data structures for several schema types
string_dict = {
'type': 'string',
'maxLength': 32
}
integer_dict = {
'type': 'integer',
'maximum': 1000000
}
array_dict = {
'type': 'array',
'items': integer_dict
}
object_dict = {
'type': 'object',
'properties': {
'a_string': string_dict,
'an_array': array_dict
},
'inclusion': 'whatever',
'additionalProperties': True,
}
# Schema object forms of the same schemas as above
string_obj = Schema(type='string', maxLength=32)
integer_obj = Schema(type='integer', maximum=1000000)
array_obj = Schema(type='array', items=integer_obj)
object_obj = Schema(type='object',
properties={'a_string': string_obj,
'an_array': array_obj},
inclusion='whatever',
additionalProperties=True)
def test_string_to_dict(self):
self.assertEquals(self.string_dict, self.string_obj.to_dict())
def test_integer_to_dict(self):
self.assertEquals(self.integer_dict, self.integer_obj.to_dict())
def test_array_to_dict(self):
self.assertEquals(self.array_dict, self.array_obj.to_dict())
def test_object_to_dict(self):
self.assertEquals(self.object_dict, self.object_obj.to_dict())
def test_string_from_dict(self):
self.assertEquals(self.string_obj, Schema.from_dict(self.string_dict))
def test_integer_from_dict(self):
self.assertEquals(self.integer_obj, Schema.from_dict(self.integer_dict))
def test_array_from_dict(self):
self.assertEquals(self.array_obj, Schema.from_dict(self.array_dict))
def test_object_from_dict(self):
self.assertEquals(self.object_obj, Schema.from_dict(self.object_dict))
def test_repr_atomic(self):
self.assertEquals(self.string_obj, eval(repr(self.string_obj)))
def test_repr_recursive(self):
self.assertEquals(self.object_obj, eval(repr(self.object_obj)))
def test_object_from_dict_with_defaults(self):
schema = Schema.from_dict(self.object_dict, inclusion='automatic')
self.assertEquals('whatever', schema.inclusion,
msg='The schema value should override the default')
self.assertEquals('automatic', schema.properties['a_string'].inclusion)
self.assertEquals('automatic', schema.properties['an_array'].items.inclusion)
|
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2.saml import NAME_FORMAT_URI
__author__ = 'rolandh'
#BASE = "http://localhost:8091/"
#BASE = "http://lingon.catalogix.se:8091/"
BASE = "http://lingon.ladok.umu.se:8091/"
CONFIG = {
"entityid" : BASE+"idp",
"service": {
"idp": {
"name" : "SAML proxy IdP",
"endpoints" : {
"single_sign_on_service" : [(BASE, BINDING_HTTP_REDIRECT)],
"single_logout_service" : [(BASE+"logout", BINDING_SOAP)],
},
"policy": {
"default": {
"lifetime": {"minutes": 240},
"attribute_restrictions": None, # means all I have
"name_form": NAME_FORMAT_URI
},
},
"subject_data": "./idp.subject.db",
}
},
"debug" : 1,
"key_file" : "pki/mykey.pem",
"cert_file" : "pki/mycert.pem",
"xmlsec_binary" : "/opt/local/bin/xmlsec1",
"metadata": {
"local": ["test/sp/sp.xml"],
},
"organization": {
"display_name": "Rolands Social proxy",
"name": "Rolands Social proxy",
"url":"http://www.example.com/roland",
},
# This database holds the map between a subjects local identifier and
# the identifier returned to a SP
"attribute_map_dir" : "./attributemaps",
"secret": "1234567890",
"logger": {
"rotating": {
"filename": "idp_proxy.log",
"maxBytes": 100000,
"backupCount": 5,
},
"loglevel": "debug",
#"format": "%(asctime)s %(name)s:%(func)s %(levelname)s %(message)s",
"format": "%(asctime)s %(name)s %(levelname)s %(message)s"
}
}
|
import logging
from pathlib import Path
from scrapy import Spider, Request
from scrapy.crawler import CrawlerProcess
from scrapy_playwright.page import PageCoroutine
class HandleTimeoutMiddleware:
def process_exception(self, request, exception, spider):
logging.info("Caught exception: %s", exception.__class__)
return Request(
url="https://httpbin.org/get",
meta={
"playwright": True,
"playwright_page_coroutines": [
PageCoroutine(
"screenshot", path=Path(__file__).parent / "recovered.png", full_page=True
),
],
},
)
class HandleExceptionSpider(Spider):
"""
Handle exceptions in the Playwright downloader, such as TimeoutError
"""
name = "awesome"
custom_settings = {
"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT": 1000,
"DOWNLOADER_MIDDLEWARES": {
HandleTimeoutMiddleware: 100,
},
}
def start_requests(self):
yield Request(
url="https://httpbin.org/delay/300",
meta={"playwright": True},
)
def parse(self, response):
yield {"url": response.url}
if __name__ == "__main__":
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"DOWNLOAD_HANDLERS": {
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
# "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
},
"RETRY_TIMES": 0,
}
)
process.crawl(HandleExceptionSpider)
process.start()
|
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
from scipy.stats import vonmises
from skimage.filters import median
from skimage.io import imread, imsave
import skimage.morphology as morpho
from skimage.measure import find_contours
from mantis import sdp_km_burer_monteiro
from experiments.utils import plot_bumps_1d, plot_matrix, plot_data_embedded
dir_name = '../results/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
dir_name += 'bunny/'
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def extract_boundary(img):
contour = find_contours(img.T, 0)[0]
contour[:, 1] *= -1
return contour
def generate_bunny_curves(save_scatter=False):
img = imread('./bunny.png', as_grey=True)
img = 255 * (img < 0.5).astype(np.uint8)
img_filtered = img.copy()
bunny_dict = {}
for i in range(700):
img_filtered = median(img_filtered, selem=morpho.disk(10))
bunny = extract_boundary(img_filtered)
bunny_dict[i] = bunny
with open('bunny.pickle', mode='w+b') as f:
pickle.dump(bunny_dict, f)
def bunny2circle2clusters():
with open('bunny.pickle', mode='r+b') as f:
bunny_dict = pickle.load(f)
for i in bunny_dict:
samples = np.linspace(0, len(bunny_dict[i]), num=200, endpoint=False,
dtype=np.int)
print(i, len(bunny_dict[i]), len(bunny_dict[i][samples]))
bunny_dict[i] = bunny_dict[i][samples]
idx_last = max([i for i in bunny_dict])
last_bunny = bunny_dict[idx_last]
centroid = np.mean(last_bunny, axis=0, keepdims=True)
norms = np.linalg.norm(last_bunny - centroid, axis=1)
r = norms.mean()
samples_per_cluster = np.linspace(0, 1, endpoint=False,
num=len(last_bunny) // 4 + 1)[1:]
circle_clustered_dict = {}
for kappa in range(1, 200):
print(kappa)
angles = []
for theta in [0, 0.5 * np.pi, np.pi, 1.5 * np.pi]:
angles.extend(vonmises.ppf(samples_per_cluster, kappa, loc=theta))
# plt.figure()
# plt.plot(angles)
# plt.show()
angles = np.sort(np.mod(angles, 2 * np.pi))
# plt.figure()
# plt.plot(angles)
# plt.show()
x = r * np.cos(angles) + centroid[0, 0]
y = r * np.sin(angles) + centroid[0, 1]
curve = np.vstack((x, y)).T
circle_clustered_dict[kappa] = curve
with open('bunny_processed.pickle', mode='w+b') as f:
pickle.dump(bunny_dict, f)
pickle.dump(circle_clustered_dict, f)
def save_curve_plots():
with open('bunny_processed.pickle', mode='r+b') as f:
bunny_dict = pickle.load(f)
circle_clustered_dict = pickle.load(f)
bunny_filename = '{}bunny{}.png'
for i in bunny_dict:
curve = bunny_dict[i]
plt.figure()
plt.scatter(curve[:, 0], curve[:, 1], s=2, marker='.')
plt.axis('equal')
if i == 0:
plt.gca().set_aspect('equal', adjustable='box')
xlim = plt.xlim()
ylim = plt.ylim()
else:
plt.xlim(xlim)
plt.ylim(ylim)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(bunny_filename.format(dir_name, i))
plt.close()
for i, k in enumerate(circle_clustered_dict):
curve = circle_clustered_dict[k]
plt.figure()
plt.scatter(curve[:, 0], curve[:, 1], s=2, marker='.')
plt.axis('equal')
plt.xlim(xlim)
plt.ylim(ylim)
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(bunny_filename.format(dir_name, i + len(bunny_dict)))
plt.close()
def process_curves():
with open('bunny_processed.pickle', mode='r+b') as f:
bunny_dict = pickle.load(f)
circle_clustered_dict = pickle.load(f)
plt.figure(figsize=(10, 6), tight_layout=True)
gs = gridspec.GridSpec(3, 6)
bunny_idx = [0, 40, 80, 174, 524, 699]
# bunny_idx = np.linspace(0, len(bunny_dict) - 1, num=5, endpoint=True,
# dtype=np.int)
for i, idx in enumerate(bunny_idx):
print(idx)
curve = bunny_dict[idx]
Y = sdp_km_burer_monteiro(curve, 20, rank=len(curve),
tol=1e-6, maxiter=5000, verbose=True)
Q = Y.dot(Y.T)
labels = np.arange(len(curve))
ax = plt.subplot(gs[0, i])
plot_data_embedded(curve, s=2, ax=ax)
ax = plt.subplot(gs[1, i])
plot_matrix(Q, labels=labels, labels_palette='hls', ax=ax)
ax = plt.subplot(gs[2, i])
plot_bumps_1d(Y, subsampling=10, labels=labels, labels_palette='hls',
ax=ax)
plt.savefig(dir_name + 'bunny_deformation.pdf', dpi=300)
plt.show()
plt.figure(figsize=(10, 6), tight_layout=True)
gs = gridspec.GridSpec(3, 6)
circle_idx = [1, 3, 5, 7, 10, 199]
# circle_idx = np.linspace(1, len(circle_clustered_dict), num=5,
# endpoint=True, dtype=np.int)
for i, idx in enumerate(circle_idx):
print(idx)
curve = circle_clustered_dict[idx]
Y = sdp_km_burer_monteiro(curve, 4, rank=len(curve),
tol=1e-6, maxiter=5000, verbose=True)
Q = Y.dot(Y.T)
labels = np.arange(len(curve))
ax = plt.subplot(gs[0, i])
plot_data_embedded(curve, s=2, ax=ax)
ax = plt.subplot(gs[1, i])
plot_matrix(Q, labels=labels, labels_palette='hls', ax=ax)
ax = plt.subplot(gs[2, i])
plot_bumps_1d(Y, subsampling=15, labels=labels, labels_palette='hls',
ax=ax)
plt.savefig(dir_name + 'circle_deformation.pdf', dpi=300)
plt.show()
def main():
generate_bunny_curves()
bunny2circle2clusters()
save_curve_plots()
process_curves()
if __name__ == '__main__':
main()
|
import tensorflow as tf
from modeler.tfmodel import TFModel
class FastTextModel(TFModel):
def __init__(self, label_size, learning_rate, batch_size, decay_steps, decay_rate, num_sampled, sentence_len,
vocab_size, embed_size, is_training):
"""init all hyperparameter here"""
# set hyperparamter
self.label_size = label_size
self.batch_size = batch_size
self.num_sampled = num_sampled
self.sentence_len = sentence_len
self.vocab_size = vocab_size
self.embed_size = embed_size
self.is_training = is_training
self.learning_rate = learning_rate
self.decay_steps, self.decay_rate = decay_steps, decay_rate
super(FastTextModel, self).__init__()
def add_placeholder(self):
self.sentence = tf.placeholder(tf.int32, [None, self.sentence_len], name="sentence") # X
self.labels = tf.placeholder(tf.int32, [None], name="Labels") # y
self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
self.instantiate_weights()
def build(self):
self.logits = self.inference() # [None, self.label_size]
def cal_loss(self):
self.loss_val = self.loss()
def recon_optimize(self):
self.train_op = self.train()
def cal_predict(self):
self.predictions = tf.argmax(self.logits, axis=1, name="predictions") # shape:[None,]
def cal_accuracy(self):
correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32), self.labels)
# tf.argmax(self.logits, 1)-->[batch_size]
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
def instantiate_weights(self):
"""define all weights here"""
# embedding matrix
self.Embedding = tf.get_variable("Embedding", [self.vocab_size, self.embed_size])
self.W = tf.get_variable("W", [self.embed_size, self.label_size])
self.b = tf.get_variable("b", [self.label_size])
def inference(self):
"""main computation graph here: 1.embedding-->2.average-->3.linear classifier"""
# 1.get emebedding of words in the sentence
sentence_embeddings = tf.nn.embedding_lookup(self.Embedding, self.sentence)
# [None,self.sentence_len,self.embed_size]
# 2.average vectors, to get representation of the sentence
self.sentence_embeddings = tf.reduce_mean(sentence_embeddings, axis=1)
# [None,self.embed_size]
# 3.linear classifier layer
logits = tf.matmul(self.sentence_embeddings, self.W) + self.b
# [None, self.label_size]==tf.matmul([None,self.embed_size],[self.embed_size,self.label_size])
return logits
def loss(self, l2_lambda=0.01): # 0.0001-->0.001
"""calculate loss using (NCE)cross entropy here"""
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
if self.is_training: # training
labels = tf.reshape(self.labels, [-1]) # [batch_size,1]------>[batch_size,]
labels = tf.expand_dims(labels, 1) # [batch_size,]----->[batch_size,1]
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=tf.transpose(self.W),
biases=self.b,
labels=labels,
inputs=self.sentence_embeddings,
num_sampled=self.num_sampled, # scalar. 100
num_classes=self.label_size, partition_strategy="div")) # scalar. 1999
else:
labels_one_hot = tf.one_hot(self.labels, self.label_size) # [batch_size]---->[batch_size,label_size]
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels_one_hot, logits=self.logits)
# labels:[batch_size,label_size];logits:[batch, label_size]
print("loss0:", loss) # shape=(?, 1999)
loss = tf.reduce_sum(loss, axis=1)
print("loss1:", loss) # shape=(?,)
l2_losses = tf.add_n(
[tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'bias' not in v.name]) * l2_lambda
return loss + l2_losses
def train(self):
"""based on the loss, use SGD to update parameter"""
learning_rate = tf.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps,
self.decay_rate, staircase=True)
train_op = tf.contrib.layers.optimize_loss(self.loss_val, global_step=self.global_step,
learning_rate=learning_rate, optimizer="Adam")
return train_op
|
def fromRGB(rgb: str):
"""
convert rgb string to 3 element list suitable for passing to OpenGL
"""
return [int(rgb[2 * i: 2 * i + 2], 16) / 255 for i in range(3)]
# TODO: add more color conversions and tools
|
class Student(Person):
# Class Constructor
#
# Parameters:
# firstName - A string denoting the Person's first name.
# lastName - A string denoting the Person's last name.
# id - An integer denoting the Person's ID number.
# scores - An array of integers denoting the Person's test scores.
#
# Write your constructor here
def __init__(self, firstName, lastName, idNumber, scores):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
self.scores = scores
# Function Name: calculate
# Return: A character denoting the grade.
#
def calculate(self):
avg = sum(self.scores) / len(self.scores)
if avg >= 90: print("Grade: O")
elif avg >= 80: print("Grade: E")
elif avg >= 70: print("Grade: A")
elif avg >= 55: print("Grade: P")
elif avg >= 40: print("Grade: D")
else: print("Grade: T")
quit()
|
# Source and destination file names.
test_source = "misc_rst_html5.txt"
test_destination = "misc_rst_html5.html"
# Keyword parameters passed to publish_file.
reader_name = "standalone"
parser_name = "rst"
writer_name = "html5"
# Settings
# local copy of stylesheets:
# (Test runs in ``docutils/test/``, we need relative path from there.)
settings_overrides['stylesheet_dirs'] = ('.', 'functional/input/data')
settings_overrides['stylesheet_path'] = 'minimal.css,responsive.css'
settings_overrides['smart_quotes'] = 'yes'
settings_overrides['embed_images'] = 'yes'
settings_overrides['toc_backlinks'] = 'top'
settings_overrides['section_self_link'] = True
|
import interpretVtt
import cv2
import math
import datetime
def createDateTime(ms):
base_datetime = datetime.datetime( 1900, 1, 1 )
delta = datetime.timedelta(0, 0, 0, ms)
return base_datetime + delta
def frameNumberToDateTime(frameNumber, fps):
time = float(frameNumber)/float(fps) # seconds
return createDateTime(time*1000)
def readFrames(videoFile):
cap = cv2.VideoCapture(videoFile)
try:
if not cap.isOpened():
print("could not open")
lengthEstimate = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
# Youtube videos sometimes report an incorrect frame rate
# rescale to 30
if (fps == 1000):
scale = 1000 / 30
fps /= scale
lengthEstimate /= scale
durationEstimate = float(lengthEstimate) / float(fps) # seconds
minutesEstimate = math.floor(durationEstimate / 60)
sampleEvery = math.floor(15*fps)
#sampleNumber =
capturedFrames = []
i = 0
while True:
ret, frame = cap.read()
if not ret:
break
if (i % sampleEvery == 0):
capturedFrames.append({'frame': frame, 'frameNumber': i, 'time': frameNumberToDateTime(i, fps)})
i += 1
# for i in range(0,sampleNumber):
# frameNumber = i * sampleEvery
# cap.set(cv2.CAP_PROP_POS_FRAMES, frameNumber)
# ret, frame = cap.read()
# if not ret:
# break
# capturedFrames.append({'frame': frame, 'frameNumber': frameNumber, 'time': frameNumberToDateTime(frameNumber, fps)})
return capturedFrames
finally:
cap.release()
return []
def getSubtitlesForFrame(captions, currentFrame, nextFrame):
frame = currentFrame['frame']
start = currentFrame['time']
text = []
rest = []
if (nextFrame):
end = nextFrame['time']
text, rest = interpretVtt.subtitlesBetween(captions, start, end)
else:
text = interpretVtt.subtitlesAfter(captions, start)
fullText = "\n".join(text)
return (fullText, rest)
def interpretVideo(videoFile, subtitleFile):
captions = interpretVtt.read(subtitleFile)
capturedFrames = readFrames(videoFile)
frames = []
for i in range(0, len(capturedFrames)):
currentFrame = capturedFrames[i]
nextFrame = capturedFrames[i + 1] if i + 1 < len(capturedFrames) else None
# consume captions
fullText, captions = getSubtitlesForFrame(captions, currentFrame, nextFrame)
frames.append({'frame': currentFrame['frame'], 'text': fullText})
return frames |
from setuptools import setup, find_packages
import json
package_json = json.load(open('package.json'))
version = package_json['version']
setup(
name='mockup',
version=version,
description="A collection of client side patterns for faster and easier "
"web development",
long_description=open("README.rst").read(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Plone",
"Framework :: Plone :: 5.2",
"Framework :: Plone :: Core",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='plone mockup',
author='Plone Foundation',
author_email='plone-developers@lists.sourceforge.net',
url='https://github.com/plone/mockup',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[],
entry_points='''
[z3c.autoinclude.plugin]
target = mockup
''',
)
|
def info(string):
if string.lower() == 'city':
return 'warszawa'
elif string.lower() == 'product':
return 'mieszkanie'
elif string.lower() == 'minimumprice':
return '125000'
else:
return '' |
from torchvision import transforms
from torchvision.datasets.folder import ImageFolder,default_loader,IMG_EXTENSIONS
import copy
import torch
def training_augmentation(resize = 256,crop = 224):
return transforms.Compose([
transforms.Resize(resize),
transforms.RandomResizedCrop(crop),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def validation_augmentation(resize = 256,crop = 224):
return transforms.Compose([
transforms.Resize(resize),
transforms.CenterCrop(crop),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
|
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
from graspologic.layouts.nooverlap._node import _Node
from graspologic.layouts.nooverlap._quad_node import (
_QuadNode,
is_overlap,
is_overlapping_any_node_and_index,
)
class TestOverlapCheck(unittest.TestCase):
def setUp(self):
self.qn = _QuadNode(
[_Node(99, 3, 7, 2, 0, "red"), _Node(100, 2, 9, 3, 0, "blue")], 5, 50
)
def test_grid_is_overlap(self):
overlaps = is_overlap(0, 0, 5, 20, 20, 5)
self.assertFalse(overlaps)
overlaps = is_overlap(0, 0, 5, 10, 10, 5)
self.assertFalse(overlaps)
overlaps = is_overlap(0, 0, 5, 0, 10, 5)
self.assertTrue(overlaps) # barely touches
overlaps = is_overlap(0, 0, 5, 10, 0, 5)
self.assertTrue(overlaps)
overlaps = is_overlap(0, 0, 4.999, 10, 0, 5)
self.assertFalse(overlaps)
overlaps = is_overlap(2, 2, 1, 4, 4, 1)
self.assertFalse(overlaps)
def test_overlap_check_list(self):
to_check = _Node(0, 2, 2, 1, -1, "blue")
others = [
_Node(1, 5, 5, 1, -1, "blue"),
_Node(2, 6, 6, 1, -1, "blue"),
_Node(3, 7, 7, 1, -1, "blue"),
]
overlapping_node = self.qn.is_overlapping_any_node(
to_check, to_check.x, to_check.y, others
)
self.assertIsNone(overlapping_node)
others += [_Node(4, 3, 3, 1, -1, "red")]
overlapping_node = self.qn.is_overlapping_any_node(
to_check, to_check.x, to_check.y, others
)
self.assertIsNotNone(overlapping_node)
self.assertEqual(overlapping_node.node_id, 4)
def test_is_overlapping_any_node_and_index(self):
to_check = _Node(0, 2, 2, 1, -1, "blue")
others = [
_Node(1, 5, 5, 1, -1, "blue"),
_Node(2, 6, 6, 1, -1, "blue"),
_Node(3, 7, 7, 1, -1, "blue"),
]
ov_idx, idx = 0, len(others)
ov_idx, overlapping_node = is_overlapping_any_node_and_index(
to_check, to_check.x, to_check.y, others, ov_idx, idx
)
self.assertEqual(2, ov_idx)
ov_idx, overlapping_node = is_overlapping_any_node_and_index(
to_check, to_check.x, to_check.y, others, 2, 3
)
self.assertEqual(2, ov_idx)
def test_just_outside_box(self):
self.assertTrue(self.qn.is_just_outside_box(5, 5, 10, 10, 1, 6, 4.9, 1)) # down
self.assertTrue(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 4, 4, 1)
) # down left
self.assertTrue(self.qn.is_just_outside_box(5, 5, 10, 10, 1, 4, 7.1, 1)) # left
self.assertTrue(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 11, 7.1, 1)
) # right
self.assertTrue(self.qn.is_just_outside_box(5, 5, 10, 10, 1, 7.1, 11, 1)) # up
# inside
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 6, 6, 1)
) # inside
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 9, 9, 1)
) # inside
# way outside
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 2.5, 7.1, 1)
) # far left
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 12.5, 7, 1)
) # right
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 7.1, 15, 1)
) # far up
self.assertFalse(
self.qn.is_just_outside_box(5, 5, 10, 10, 1, 7.1, -3, 1)
) # far down
# TESTING NEGATIVE
self.assertTrue(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -6, -11, 1)
) # down
self.assertTrue(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -4, -4, 1)
) # up right
self.assertTrue(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -11, -7.1, 1)
) # left
self.assertTrue(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -4, -7.1, 1)
) # right
self.assertTrue(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -7.1, -4, 1)
) # up
# inside
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -6, -6, 1)
) # inside
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -9, -9, 1)
) # inside
# way outside
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -2.5, -7.1, 1)
) # far right
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -12.5, -7, 1)
) # left
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -7.1, -15, 1)
) # far down
self.assertFalse(
self.qn.is_just_outside_box(-10, -10, -5, -5, 1, -7.1, -1, 1)
) # far up
def test_get_nodes_just_outside_box(self):
others = [
_Node(0, 6, 4.9, 1, 7, "blue"),
_Node(1, 4, 4, 1, -1, "blue"),
_Node(2, 4, 7.1, 1, -1, "blue"),
_Node(3, 11, 7.1, 1, -1, "blue"),
_Node(4, 7.1, 11, 1, -1, "blue"),
_Node(5, 6, 6, 1, -1, "blue"),
_Node(6, 9, 9, 1, -1, "blue"),
_Node(7, 2.5, 7.1, 1, -1, "blue"),
_Node(8, 7.1, 15, 1, -1, "blue"),
_Node(9, 7.1, -3, 1, -1, "blue"),
]
local_quad = _QuadNode(
[_Node(99, 5, 5, 1, 0, "red"), _Node(100, 10, 10, 1, 0, "blue")], 5, 50
)
just_outside = local_quad.get_nodes_near_lines(others)
self.assertEqual(5, len(just_outside))
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.nn import Cell
import mindspore.ops as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class SqueezeNet(Cell):
def __init__(self):
super(SqueezeNet, self).__init__()
self.squeeze = P.Squeeze()
def construct(self, x):
return self.squeeze(x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_squeeze_shape_float32():
x = np.ones(shape=[1, 2, 1, 1, 8, 3, 1]).astype(np.float32)
expect = np.ones(shape=[2, 8, 3]).astype(np.float32)
net = SqueezeNet()
result = net(Tensor(x))
assert np.allclose(result.asnumpy(), expect, rtol=1.e-4,
atol=1.e-8, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_squeeze_shape_int32():
x = np.array([[7], [11]]).astype(np.int32)
expect = np.array([7, 11]).astype(np.int32)
net = SqueezeNet()
result = net(Tensor(x))
assert np.allclose(result.asnumpy(), expect, rtol=1.e-4,
atol=1.e-8, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_squeeze_shape_bool():
x = np.array([[True], [False]]).astype(np.bool_)
expect = np.array([True, False]).astype(np.bool_)
net = SqueezeNet()
result = net(Tensor(x))
assert np.allclose(result.asnumpy(), expect, rtol=1.e-4,
atol=1.e-8, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_squeeze_shape_float64():
x = np.random.random([1, 2, 1, 1, 8, 3, 1]).astype(np.float64)
expect = np.squeeze(x)
net = SqueezeNet()
result = net(Tensor(x))
print(result.asnumpy()[0][0], expect[0][0])
assert np.allclose(result.asnumpy(), expect, rtol=1.e-4,
atol=1.e-8, equal_nan=True)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_squeeze_shape_uint16():
x = np.random.random([1, 2, 1, 1, 8, 3, 1]).astype(np.uint16)
expect = np.squeeze(x)
net = SqueezeNet()
result = net(Tensor(x))
print(result.asnumpy()[0][0], expect[0][0])
assert np.allclose(result.asnumpy(), expect, rtol=1.e-4,
atol=1.e-8, equal_nan=True)
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.db.models import get_app, get_models
class Migration(SchemaMigration):
# old_name => new_name
apps_to_rename = {
'some_old_app' : 'some_new_app',
'another_old_app' : 'another_new_app'
}
def forwards(self, orm):
for old_appname, new_appname in self.apps_to_rename.items():
# Renaming model from 'Foo' to 'Bar'
db.execute("UPDATE south_migrationhistory SET app_name = %s WHERE app_name = %s", [new_appname, old_appname])
db.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [new_appname, old_appname])
app = get_app(new_appname)
for model in get_models(app, include_auto_created=True):
if model._meta.proxy == True:
continue
new_table_name = model._meta.db_table
old_table_name = old_appname + new_table_name[len(new_appname):]
db.rename_table(old_table_name, new_table_name)
def backwards(self, orm):
for old_appname, new_appname in self.apps_to_rename.items():
# Renaming model from 'Foo' to 'Bar'
db.execute("UPDATE south_migrationhistory SET app_name = %s WHERE app_name = %s", [old_appname, new_appname])
db.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [old_appname, new_appname])
app = get_app(new_appname)
for model in get_models(app, include_auto_created=True):
if model._meta.proxy == True:
continue
old_table_name = model._meta.db_table
new_table_name = old_appname + old_table_name[len(new_appname):]
db.rename_table(old_table_name, new_table_name)
|
import glob, os
import numpy as np
from dess_utils.data_utils import imagesc
from skimage import measure
from scipy import ndimage
def convert_3D_to_2D(source, destination):
l = glob.glob(source + '*.npy')[:]
l.sort()
if not os.path.isdir(destination):
os.makedirs(destination)
for name in l:
x = np.load(name)
r = (x == 3).mean(1)
g = (x == 4).mean(1)
b = (((x == 1) + (x == 2)).mean(1))
y = np.zeros((224, 224, 3))
y[:r.shape[0], :, 0] = r
y[:r.shape[0], :, 1] = g
#y[:r.shape[0], :, 2] = b
imagesc(y, show=False, save=destination + name.split('/')[-1].split('.')[0] + '.png')
def clean_bone_blobs(x):
bone = ((x == 1) + (x == 2))
bone_blobs = measure.label(bone)
bone_blobs_size = []
for i in range(bone_blobs.max()):
bone_blobs_size.append((bone_blobs == i).sum())
bone_blobs_size = np.array(bone_blobs_size)
background_index = np.argsort(bone_blobs_size)[-1]
femur_index = np.argsort(bone_blobs_size)[-2]
tibia_index = np.argsort(bone_blobs_size)[-3]
background = (bone_blobs == background_index)
femur = (bone_blobs == femur_index)
tibia = (bone_blobs == tibia_index)
print(id)
ratio = (femur.sum()+tibia.sum()) / bone.sum()
print('Extracted bone percentage = ' + str(ratio))
if ratio <= 0.9:
return 0
y = 0 * x
y[femur == 1] = 1
y[tibia == 1] = 2
y[x == 3] = 3
y[x == 4] = 4
return y
def sbl_3d(x):
dist_to_femur = ndimage.distance_transform_edt(x != 1)
dist_to_tibia = ndimage.distance_transform_edt(x != 2)
dist_to_fc = ndimage.distance_transform_edt(x != 3)
dist_to_tc = ndimage.distance_transform_edt(x != 3)
femur_sbl = np.multiply((x == 1), (dist_to_fc <= 3)/1)
tibia_sbl = np.multiply((x == 2), (dist_to_tc <= 3)/1)
femur_sbl_distance = np.multiply((femur_sbl == 1)/1, dist_to_tibia)
tibia_sbl_distance = np.multiply((tibia_sbl == 1)/1, dist_to_femur)
y = np.concatenate([np.expand_dims(x, 3) for x in [femur_sbl, tibia_sbl]], 3)
y = y.astype(np.uint8)
return y
if __name__ == '__main__':
source = 'data/testing/SAG_3D_DESS_LEFT_seg/'
destination_clean = source.split('_seg/')[0] + '_clean/'
destination_sbl = source.split('_seg/')[0] + '_sbl/'
if not os.path.isdir(destination_clean):
os.mkdir(destination_clean)
if not os.path.isdir(destination_sbl):
os.mkdir(destination_sbl)
l = sorted(glob.glob(source+'*.npy'))
for name in l:
x = np.load(name)
id = name.split(source)[1]
y = clean_bone_blobs(x)
z = sbl_3d(y)
np.save(destination_clean + id, y.astype(np.uint8))
np.save(destination_sbl + id, z.astype(np.uint8))
|
"""
cP_EOS.py
SPDX-License-Identifier: BSD-2-Clause
Copyright (c) 2021 Stuart Nolan. All rights reserved.
"""
import pdb
import CoolProp.CoolProp as cP
from CoolProp import AbstractState as cPAS
from tabulate import tabulate
from scipy.optimize import minimize
def cEOS_fit_kij(kij, data, cPAS_EOS):
cPAS_EOS.set_binary_interaction_double(0,1,"kij",kij[0])
(DPpP,Dy,outData) = deltaVar(data,cPAS_EOS)
return DPpP # fit based on DPpP
def deltaVar(inData, cPAS_EOS):
DPpP = 0 # sum_over_i( abs(P_exp_i- P_EOS_i)/P_exp_i )
Dy = 0 # sum_over_i( abs(vMF_exp_c1_i - vMF_EOS_c1_i )
outData = []
for row in inData:
T_exp = row[0]
P_exp = row[1]
lMF_exp_c1 = row[2] # c1 liquid Mole Fraction
vMF_exp_c1 = row[3] # c1 vapor Mole Fraction
cPAS_EOS.set_mole_fractions([lMF_exp_c1, 1-lMF_exp_c1])
#if cPAS_cEOS.phase() in [0,6]: # 0 is liquid phase
cPAS_EOS.update(cP.QT_INPUTS, 0, T_exp) # VLE, 0 = 100% liquid phase
P_EOS = cPAS_EOS.p()/100000 # bar
DPpP = DPpP + abs(P_exp - P_EOS)/P_exp
vMFs_EOS = cPAS_EOS.mole_fractions_vapor() # EOS vMFs = [c1, c2]
Dy = Dy + abs(vMF_exp_c1 - vMFs_EOS[0])
outData.append([T_exp, lMF_exp_c1, P_exp,
"{0:.3f}".format(P_EOS),
"{0:.3f}".format(vMF_exp_c1),
"{0:.3f}".format(vMFs_EOS[0])])
DPpP = DPpP*100/len(data)
Dy = Dy*100/len(data)
return (DPpP, Dy, outData)
"""
from CoolProp/include/DataStructures.h
0 iphase_liquid, < Subcritical liquid
1 iphase_supercritical, < Supercritical (p > pc, T > Tc)
2 iphase_supercritical_gas, < Supercritical gas (p < pc, T > Tc)
3 iphase_supercritical_liquid, < Supercritical liquid (p > pc, T < Tc)
4 iphase_critical_point, < At the critical point
5 iphase_gas, < Subcritical gas
6 iphase_twophase, < Twophase
7 iphase_unknown, < Unknown phase
8 iphase_not_imposed
import CoolProp
CoolProp.iphase_twophase
Out: 6
"""
cPFluids = dict([(fluid, cP.get_fluid_param_string(fluid,"CAS")) for fluid in
cP.get_global_param_string("fluids_list").split(',')])
# H2, N2, CO ternary data, Table VI, pA-4 (p78), Eubanks, 1957
# REF: https://scholarship.rice.edu/bitstream/handle/1911/18254/3079688.PDF?sequence=1&isAllowed=y
c1="Hydrogen"
c2="Nitrogen"
rawdata_header=["T /degF", "P /psia", "%s lMF" % c1 , "%s vMF" % c1]
rawdata = [
[-310,315,0.0487, 0.8655],
[-310,500,0.0763, 0.8948],
# [-310,1400,0.2488, 0.8622],
# [-310,2000,0.3446, 0.7977],
[-280,315,0.0377, 0.5509],
[-280,500,0.0741, 0.6686],
[-280,800,0.1384, 0.7205],
[-280,1100,0.2092, 0.7070],
[-280,1400,0.3221, 0.6462],
]
data_header=["T /K", "P /bar", "%s lMF" % c1 , "%s vMF" % c1]
data = []
psia2bar = lambda P : P/14.503774
degF2K = lambda T : 5/9*(T - 32) + 273.15
for row in rawdata:
data.append([degF2K(row[0]),psia2bar(row[1]),row[2],row[3]])
#print(tabulate(data,headers=data_header)+'\n')
EOS="PR"
cPAS_cEOS = cPAS(EOS, c1+"&"+c2)
res = minimize(cEOS_fit_kij, 0.1, bounds=[(-0.2,0.5)], args=(data, cPAS_cEOS))
kij=res.x[0]
#kij=0.0864 # EOS="PR"
#kij=0.0641 # EOS="SRK"
cPAS_cEOS.set_binary_interaction_double(0,1,"kij",kij)
(DPpP,Dy,outData) = deltaVar(data,cPAS_cEOS)
outData_header=["T_exp /K", "c1 lMF_exp", "P_exp /bar", "P_%s /bar" % EOS,
"c1 vMF_exp", "c1 vMF_%s" % EOS]
print("\nc1: %s;" % c1)
print("c2: %s;" % c2)
print("kij: %.4f; DPpP_%s: %.2f; Dy_%s: %.2f;\n" % (kij, EOS, DPpP, EOS, Dy))
print(tabulate(outData,headers=outData_header)+'\n')
CAS_c1=cPFluids[c1]
CAS_c2=cPFluids[c2]
# [c2-c1 betaT gammaT betaV gammaV], Table A8, ref 31, Kunz & Wagner 2012
# REF: https://github.com/CoolProp/CoolProp/blob/master/dev/mixtures/KunzWagner2012_TableA8.txt
#cP.set_mixture_binary_pair_data(CAS_c1,CAS_c2,'betaT',0.972532065)
#cP.set_mixture_binary_pair_data(CAS_c1,CAS_c2,'betaV',0.946134337)
EOS = "HEOS"
cPAS_HEOS = cP.AbstractState(EOS,c1+"&"+c2)
"""
(DPpP,Dy,outData) = deltaVar(data,cPAS_HEOS)
# pdb.pm() from ipython...
outData_header=["T_exp /K", "c1 lMF_exp", "P_exp /bar", "P_%s /bar" % EOS,
"c1 vMF_exp", "c1 vMF_%s" % EOS]
print("\nc1: %s;" % c1)
print("c2: %s;" % c2)
print("DPpP_%s: %.2f; Dy_%s: %.2f;\n" % (EOS, DPpP, EOS, Dy))
print(tabulate(outData,headers=outData_header)+'\n')
"""
|
from typing import *
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
给定一个整数 n,生成所有由 1 ... n 为节点所组成的二叉搜索树。
示例:
输入: 3
输出:
[
[1,null,3,2],
[3,2,null,1],
[3,1,null,null,2],
[2,1,3],
[1,null,2,null,3]
]
解释:
以上的输出对应以下 5 种不同结构的二叉搜索树:
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
"""
def generateTrees(self, n: int) -> List[TreeNode]:
pass
def generate_trees(n: int) -> List[TreeNode]:
"""
:param n:
:return:
>>> len(generate_trees(3))
5
>>> generate_trees(0)
[]
"""
if n <= 0:
return []
res = generate_trees_core(1, n)
return res
def generate_trees_core(start: int, end: int) -> List[TreeNode]:
if start > end:
return [None]
elif start == end:
return [TreeNode(start)]
result: List[TreeNode] = []
for i in range(start, end + 1):
left_trees: List[TreeNode] = generate_trees_core(start, i - 1)
right_trees: List[TreeNode] = generate_trees_core(i + 1, end)
for l in left_trees:
for r in right_trees:
root: TreeNode = TreeNode(i)
root.left = l
root.right = r
result.append(root)
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# import webapp2
# import os
import logging
from google.appengine.api import urlfetch
import urllib2
# from urlparse import urlparse
from urllib import urlencode
import json
import secrets
import time
import base64
import hmac
import hashlib
import email.utils
import Cookie
FB_LOGIN_URL = "https://www.facebook.com/dialog/oauth"
# FB_GET_TOKEN_URI = ""
# FB_GET_INFO_URI = ""
GOOGLE_LOGIN_URI = "https://accounts.google.com/o/oauth2/auth"
GOOGLE_GET_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
GOOGLE_GET_INFO_URI = 'https://www.googleapis.com/oauth2/v3/userinfo?{0}'
def cookie_signature(*parts):
"""Generates a cookie signature.
We use the app secret since it is different for every app (so
people using this example don't accidentally all use the same secret).
"""
chash = hmac.new(secrets.ENCRYPTION_SECRET, digestmod=hashlib.sha1)
for part in parts:
chash.update(part)
return chash.hexdigest()
def set_cookie(response, name, value, domain=None, path="/", expires=None, encrypt=True):
"""Generates and signs a cookie for the given name/value"""
# encrypt is never used
if expires == 0:
timestamp = str(0)
else:
timestamp = str(int(time.time()))
value = base64.b64encode(value)
signature = cookie_signature(value, timestamp)
cookie = Cookie.BaseCookie()
cookie[name] = "|".join([value, timestamp, signature])
cookie[name]["path"] = path
if domain:
cookie[name]["domain"] = domain
if expires:
cookie[name]["expires"] = email.utils.formatdate(
expires, localtime=False, usegmt=True)
response.headers.add_header("Set-Cookie", cookie.output()[12:])
def parse_cookie(value, cookie_duration):
"""Parses and verifies a cookie value from set_cookie"""
if not value:
return None
parts = value.split("|")
if len(parts) != 3:
return None
if cookie_signature(parts[0], parts[1]) != parts[2]:
logging.warning("Invalid cookie signature %r", value)
return None
timestamp = int(parts[1])
if timestamp < (time.time() - cookie_duration):
logging.warning("Expired cookie %r", value)
return None
try:
return base64.b64decode(parts[0]).strip()
except:
return None
class LoginManager():
@staticmethod
def get_login_URLs(request, params={}):
login_dict = {
'facebook': LoginManager.get_login_URL(request, 'facebook'),
'google': LoginManager.get_login_URL(request, 'google')
}
return login_dict
@staticmethod
def get_login_URL(request, provider, params={}):
callback_url = request.host_url
# request.url.split('?')[0]
if provider == 'facebook':
url = FB_LOGIN_URL + "?client_id=" + secrets.FB_APP_ID + \
"&redirect_uri=" + callback_url + "/fb/oauth_callback&scope=email"
if provider == 'google':
url = GOOGLE_LOGIN_URI + "?client_id=" + secrets.GOOGLE_APP_ID + "&redirect_uri=" + \
callback_url + "/google/oauth_callback" + \
"&response_type=code&scope=email%20profile"
return url
@staticmethod
def handle_oauth_callback(request, provider):
error = request.get('error')
if error:
logging.debug(error)
return None, None, error
# verify csrf state
# extract access token from the parameters
code = request.get('code')
callback_url = request.url.split('?')[0]
# exchange code for token
if provider == 'facebook':
url = "https://graph.facebook.com/oauth/access_token?client_id="+secrets.FB_APP_ID+"&redirect_uri=" + \
callback_url + "&client_secret=" + \
secrets.FB_APP_SECRET + "&code=" + code
result = urllib2.urlopen(url).read()
if result:
access_token, expiration = result.lstrip(
"access_token=").split("&expires=")
# url = "https://graph.facebook.com/me?access_token=" + \
# access_token
# return json.loads(urllib2.urlopen(url).read()), access_token, None
return access_token, None
else:
return None, 'No Result'
elif provider == 'google':
payload = {
'code': code,
'client_id': secrets.GOOGLE_APP_ID,
'client_secret': secrets.GOOGLE_APP_SECRET,
'redirect_uri': callback_url,
'grant_type': 'authorization_code'
}
# get access token from the request token
# logging.debug('uri for'+self.uri_for('ciao', _full=True))
resp = urlfetch.fetch(
url=GOOGLE_GET_TOKEN_URI,
payload=urlencode(payload),
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
# get user data using access token
auth_info = json.loads(resp.content)
logging.debug('auth_info')
logging.debug(auth_info)
access_token = auth_info['access_token']
# url = 'https://www.googleapis.com/oauth2/v3/userinfo?{0}'
# target_url = GOOGLE_GET_INFO_URI.format(
# urlencode({'access_token': auth_info['access_token']}))
# resp = urlfetch.fetch(target_url).content
# user_data = json.loads(resp)
# if 'id' not in user_data and 'sub' in user_data:
# user_data['id'] = user_data['sub']
return access_token, None
else:
return None, 'invalid provider'
|
import sys
sys.path.append('/home/cryo/')
import pysmurf
import time
epics_root = 'dev_epics'
sd = epics_root + ':AMCc:FpgaTopLevel:AppTop:AppCore:StreamReg:StreamData[{}]'
S = pysmurf.SmurfControl(setup=False, epics_root=epics_root,
make_logfile=False)
ch = 0
step_size = 2**6
a = -2**15
S._caput(sd.format(ch), 0, write_log=True)
while True:
write_log=False
if a % (step_size*100) == 0:
write_log=True
S._caput(sd.format(ch), a, write_log=write_log)
a += step_size
time.sleep(.02)
if a > 2**15:
a = -2**15
|
#!/usr/bin/env python
#
# ___INFO__MARK_BEGIN__
#######################################################################################
# Copyright 2016-2021 Univa Corporation (acquired and owned by Altair Engineering Inc.)
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
# ___INFO__MARK_END__
#
import tempfile
import types
from nose import SkipTest
from .utils import needs_uge
from .utils import generate_random_string
from .utils import create_config_file
from .utils import load_values
from uge.api.qconf_api import QconfApi
from uge.config.config_manager import ConfigManager
from uge.log.log_manager import LogManager
from uge.exceptions.object_not_found import ObjectNotFound
from uge.exceptions.object_already_exists import ObjectAlreadyExists
create_config_file()
API = QconfApi()
HOST_GROUP_NAME = '@%s' % generate_random_string(6)
CONFIG_MANAGER = ConfigManager.get_instance()
HOST_NAME = CONFIG_MANAGER['host']
LOG_MANAGER = LogManager.get_instance()
VALUES_DICT = load_values('test_values.json')
print(VALUES_DICT)
@needs_uge
def test_generate_hgrp():
hgrp = API.generate_hgrp(HOST_GROUP_NAME)
assert (hgrp.data['group_name'] == HOST_GROUP_NAME)
def test_list_hgrps():
hgrpl = API.list_hgrps()
assert (hgrpl is not None)
def test_add_hgrp():
hgrp = API.add_hgrp(name=HOST_GROUP_NAME, data={'hostlist': HOST_NAME})
assert (hgrp.data['group_name'] == HOST_GROUP_NAME)
def test_object_already_exists():
try:
hgrpl = API.list_hgrps()
if len(hgrpl):
hgrp = API.add_hgrp(name=hgrpl[0])
assert (False)
else:
raise SkipTest('There are no configured UGE host groups.')
except ObjectAlreadyExists as ex:
# ok
pass
def test_object_not_found():
try:
hgrp = API.get_hgrp('__non_existent_host_group__')
assert (False)
except ObjectNotFound as ex:
# ok
pass
def test_get_hgrp():
hgrpl = API.list_hgrps()
if len(hgrpl):
hgrp = API.get_hgrp(hgrpl[0])
assert (hgrp.data['group_name'] == hgrpl[0])
else:
raise SkipTest('There are no configured UGE host groups.')
def test_generate_hgrp_from_json():
hgrpl = API.list_hgrps()
if len(hgrpl):
hgrp = API.get_hgrp(hgrpl[0])
else:
raise SkipTest('There are no configured UGE host groups.')
json = hgrp.to_json()
hgrp2 = API.generate_object(json)
assert (hgrp2.__class__.__name__ == hgrp.__class__.__name__)
for key in list(hgrp.data.keys()):
v = hgrp.data[key]
v2 = hgrp2.data[key]
if type(v) == list:
assert (len(v) == len(v2))
for s in v:
assert (v2.count(s) == 1)
def test_modify_hgrp():
hgrpl = API.list_hgrps()
if len(hgrpl):
hgrp = API.get_hgrp(hgrpl[0])
original_host_list = hgrp.data['hostlist']
hgrp2 = API.modify_hgrp(name=hgrpl[0], data={'hostlist': HOST_NAME})
assert (hgrp2.data['hostlist'].count(HOST_NAME) == 1)
hgrp3 = API.modify_hgrp(name=hgrpl[0], data={'hostlist': original_host_list})
assert (hgrp3.data['hostlist'] == original_host_list)
else:
raise SkipTest('There are no configured UGE host groups.')
def test_get_hgrps():
hgrpl = API.list_hgrps()
hgrps = API.get_hgrps()
for hgrp in hgrps:
print("#############################################")
print(hgrp.to_uge())
assert (hgrp.data['group_name'] in hgrpl)
def test_write_hgrps():
try:
tdir = tempfile.mkdtemp()
print("*************************** " + tdir)
hgrp_names = VALUES_DICT['hgrp_names']
hgrps = API.get_hgrps()
for hgrp in hgrps:
print("Before #############################################")
print(hgrp.to_uge())
new_hgrps = []
for name in hgrp_names:
nhgrp = API.generate_hgrp(name=name)
new_hgrps.append(nhgrp)
API.mk_hgrps_dir(tdir)
API.write_hgrps(new_hgrps, tdir)
API.add_hgrps_from_dir(tdir)
API.modify_hgrps_from_dir(tdir)
hgrps = API.get_hgrps()
for hgrp in hgrps:
print("After #############################################")
print(hgrp.to_uge())
hgrps = API.list_hgrps()
for name in hgrp_names:
assert (name in hgrps)
print("host group found: " + name)
finally:
API.delete_hgrps_from_dir(tdir)
API.rm_hgrps_dir(tdir)
def test_add_hgrps():
try:
new_hgrps = []
hgrp_names = VALUES_DICT['hgrp_names']
for name in hgrp_names:
nhgrp = API.generate_hgrp(name=name)
new_hgrps.append(nhgrp)
# print all host groups currently in the cluster
hgrps = API.get_hgrps()
for hgrp in hgrps:
print("Before #############################################")
print(hgrp.to_uge())
# add host groups
API.add_hgrps(new_hgrps)
API.modify_hgrps(new_hgrps)
# print all host groups currently in the cluster
hgrps = API.get_hgrps()
for hgrp in hgrps:
print("After #############################################")
print(hgrp.to_uge())
# check that host groups have been added
hgrps = API.list_hgrps()
for name in hgrp_names:
assert (name in hgrps)
print("host group found: " + name)
finally:
API.delete_hgrps(new_hgrps)
def test_delete_hgrp():
hgrpl = API.list_hgrps()
if hgrpl.count(HOST_GROUP_NAME):
API.delete_hgrp(HOST_GROUP_NAME)
hgrpl2 = API.list_hgrps()
assert (hgrpl2.count(HOST_GROUP_NAME) == 0)
else:
raise SkipTest('There UGE host group %s has not been added.' % HOST_GROUP_NAME)
|
ENV_ID_TO_POLICY = {
'gallop_ol': ('rex_gym/policies/gallop/ol', 'model.ckpt-4000000'),
'gallop_ik': ('rex_gym/policies/gallop/ik', 'model.ckpt-2000000'),
'walk_ik': ('rex_gym/policies/walk/ik', 'model.ckpt-2000000'),
'walk_ol': ('rex_gym/policies/walk/ol', 'model.ckpt-4000000'),
'standup_ol': ('rex_gym/policies/standup/ol', 'model.ckpt-2000000'),
'turn_ik': ('rex_gym/policies/turn/ik', 'model.ckpt-2000000'),
'turn_ol': ('rex_gym/policies/turn/ol', 'model.ckpt-2000000'),
'poses_ik': ('rex_gym/policies/poses', 'model.ckpt-2000000')
}
ENV_ID_TO_ENV_NAMES = {
'gallop': 'RexReactiveEnv',
'walk': 'RexWalkEnv',
'turn': 'RexTurnEnv',
'standup': 'RexStandupEnv',
'go': 'RexGoEnv',
'poses': 'RexPosesEnv'
}
DEFAULT_SIGNAL = {
'gallop': 'ik',
'walk': 'ik',
'turn': 'ol',
'standup': 'ol',
'go': 'ik',
'poses': 'ik'
}
TERRAIN_TYPE = {
'mounts': 'png',
'maze': 'png',
'hills': 'csv',
'random': 'random',
'plane': 'plane'
}
|
from distutils.core import setup
setup(
name='Cochlear',
version='0.1',
author='Brad Buran (bburan@alum.mit.edu)',
packages=['cochlear'],
url='http://github.com/bburan/cochlear',
license='LICENSE.txt',
description='Module for various auditory experiments',
requires=['numpy'],
scripts=[
'scripts/merge_files.py',
'scripts/truncate_file.py',
'scripts/run_cochlear.py',
'scripts/remove_trials.py',
],
)
|
"""Test for Word cloud Xmodule functional logic."""
import json
from unittest.mock import Mock
from django.test import TestCase
from fs.memoryfs import MemoryFS
from lxml import etree
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from webob.multidict import MultiDict
from xblock.field_data import DictFieldData
from xmodule.word_cloud_module import WordCloudBlock
from . import get_test_descriptor_system, get_test_system
class WordCloudBlockTest(TestCase):
"""
Logic tests for Word Cloud XBlock.
"""
raw_field_data = {
'all_words': {'cat': 10, 'dog': 5, 'mom': 1, 'dad': 2},
'top_words': {'cat': 10, 'dog': 5, 'dad': 2},
'submitted': False,
'display_name': 'Word Cloud Block',
'instructions': 'Enter some random words that comes to your mind'
}
def test_xml_import_export_cycle(self):
"""
Test the import export cycle.
"""
runtime = get_test_descriptor_system()
runtime.export_fs = MemoryFS()
original_xml = (
'<word_cloud display_name="Favorite Fruits" display_student_percents="false" '
'instructions="What are your favorite fruits?" num_inputs="3" num_top_words="100"/>\n'
)
olx_element = etree.fromstring(original_xml)
id_generator = Mock()
block = WordCloudBlock.parse_xml(olx_element, runtime, None, id_generator)
block.location = BlockUsageLocator(
CourseLocator('org', 'course', 'run', branch='revision'), 'word_cloud', 'block_id'
)
assert block.display_name == 'Favorite Fruits'
assert not block.display_student_percents
assert block.instructions == 'What are your favorite fruits?'
assert block.num_inputs == 3
assert block.num_top_words == 100
node = etree.Element("unknown_root")
# This will export the olx to a separate file.
block.add_xml_to_node(node)
with runtime.export_fs.open('word_cloud/block_id.xml') as f:
exported_xml = f.read()
assert exported_xml == original_xml
def test_bad_ajax_request(self):
"""
Make sure that answer for incorrect request is error json.
"""
module_system = get_test_system()
block = WordCloudBlock(module_system, DictFieldData(self.raw_field_data), Mock())
response = json.loads(block.handle_ajax('bad_dispatch', {}))
self.assertDictEqual(response, {
'status': 'fail',
'error': 'Unknown Command!'
})
def test_good_ajax_request(self):
"""
Make sure that ajax request works correctly.
"""
module_system = get_test_system()
block = WordCloudBlock(module_system, DictFieldData(self.raw_field_data), Mock())
post_data = MultiDict(('student_words[]', word) for word in ['cat', 'cat', 'dog', 'sun'])
response = json.loads(block.handle_ajax('submit', post_data))
assert response['status'] == 'success'
assert response['submitted'] is True
assert response['total_count'] == 22
self.assertDictEqual(
response['student_words'],
{'sun': 1, 'dog': 6, 'cat': 12}
)
self.assertListEqual(
response['top_words'],
[{'text': 'cat', 'size': 12, 'percent': 55.0},
{'text': 'dad', 'size': 2, 'percent': 9.0},
{'text': 'dog', 'size': 6, 'percent': 27.0},
{'text': 'mom', 'size': 1, 'percent': 5.0},
{'text': 'sun', 'size': 1, 'percent': 4.0}]
)
assert 100.0 == sum(i['percent'] for i in response['top_words'])
def test_indexibility(self):
"""
Test indexibility of Word Cloud
"""
module_system = get_test_system()
block = WordCloudBlock(module_system, DictFieldData(self.raw_field_data), Mock())
assert block.index_dictionary() ==\
{'content_type': 'Word Cloud',
'content': {'display_name': 'Word Cloud Block',
'instructions': 'Enter some random words that comes to your mind'}}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 16 11:05:33 2017
Example of loading a forest file and updating for
use with sage. This sorts forests (prunes as well for all
forests with <2 halos, and adds meta information)
@author: Pascal Jahan Elahi
"""
import sys
import os
import glob
#load python routines
scriptpath=os.path.abspath(__file__)
basecodedir=scriptpath.split('examples/')[0]+'/tools/'
sys.path.append(basecodedir)
#load the cythonized code if compiled
if (len(glob.glob(basecodedir+'velociraptor_python_tools_cython.*.so'))==1):
print('using cython VR+TF toolkit')
import velociraptor_python_tools_cython as vpt
else:
print('using python VR+TF toolkit')
import velociraptor_python_tools as vpt
#base file name of the forest file
fname=sys.argv[1]
vpt.PruneForest(fname)
vpt.ForestSorter(fname)
vpt.ForceBiDirectionalTreeInForestFile(fname)
vpt.ForestFileAddMetaData(fname)
|
colormap = plt.cm.YlGnBu # choose your favourite one!
fig = plt.figure(figsize=(15, 10))
ax = plt.axes(projection=lambert93)
X = sp98_dept_filled.mean()
norm = plt.Normalize(X.min(axis=0), X.max(axis=0))
for dept, value in zip(X.index[:-1], X):
if dept not in ["20", "974"]: # On omet la Corse et la Réunion...
ax.add_geometries(
shapes[dept],
ccrs.PlateCarree(),
edgecolor="#aaaaaa",
facecolor=colormap(norm(value)),
)
ax.coastlines("10m", color="#226666")
ax.set_xlim((80000, 1150000))
ax.set_ylim((6100000, 7150000))
mappable = plt.cm.ScalarMappable(norm, colormap)
mappable.set_array(X)
fig.colorbar(mappable, label="Prix moyen du SP98")
|
'''
trading start data importer
'''
import os, json, sys
from pathlib import Path
import data
if len(sys.argv) < 2:
sys.exit('Usage: %s config-path' % sys.argv[0])
if not os.path.exists(sys.argv[1]):
sys.exit('ERROR: Database %s was not found!' % sys.argv[1])
pwd = sys.argv[1]
root = Path(pwd).parent
data_path = ''
print('building dataset')
with open(os.path.join(root, 'config.json')) as json_data:
d = json.load(json_data)
data_path = d['destinationPath']
print('data path: ', data_path)
try:
trans_data = data.get_transaction_frame(data_path)
trans_data['index'] = trans_data.index
print('holdings data')
holding_data = data.get_holdings_frame(data_path)
holding_data = holding_data.rename(columns={'Code': 'Tick'})
holding_data['index'] = holding_data.index
holding_data = holding_data[holding_data.Date == holding_data.Date.max()]
print('account data')
account_data = data.get_account_frame(data_path)
price_data = data.get_price_frame(data_path)
company_data = data.get_companies_frame(data_path)
es = data.get_entityset(holding_data, price_data, trans_data, company_data)
print(es)
except Exception as ex:
print(str(ex)) |
# runNBA_Data.py
import time
import pandas as pd
import numpy as np
# Machine Learning algorithms
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures,scale
from sklearn.cross_validation import train_test_split, KFold
from sklearn.learning_curve import learning_curve
# Plot modules
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
pd.options.display.max_columns = 50
pd.set_option('expand_frame_repr', False)
# Custom modules
from nbaImport import readMongo, WANTED_FEATURES, PER_FEATURES
def flatten(objToFlatten):
return [item for sublist in objToFlatten for item in sublist]
def BuildDataSet():
# 1
nbaFrame = readMongo(db='YOUR DATABASE',collection='above50Games',
query= {}, queryReturn=WANTED_FEATURES, no_id=False,
mongo_uri='YOUR URI')
# 2
statsDF = pd.DataFrame(list(flatten(nbaFrame.Seasons)))
print(statsDF)
# 1
stats = pd.DataFrame(list(statsDF.totals.values))
stats['FT_M'] = stats['FTA'] - stats['FT']
stats['FG_M'] = stats['FGA'] - stats['FG']
stats[PER_FEATURES] = stats[PER_FEATURES].astype(float)
# 2
stats['PER'] = pd.DataFrame(list(statsDF.advanced.values))
# 3
stats = stats.reindex(np.random.permutation(stats.index))
X = np.array(stats[PER_FEATURES].values)
y = (stats["PER"].values.tolist())
return X,y
def PlotLearningCurve(X_data, y_data,algorithm, s_time):
print('PlotLearningCurve called')
# 1
sizes = np.array([.1,.2,.5,.8,.99])
train_sizes, train_scores, test_scores = learning_curve(
algorithm,
X_data,
y_data,
train_sizes=sizes)
print('after learning_curve')
train_mean = np.mean(train_scores, axis=1)
train_std = np.std(train_scores, axis=1)
test_mean = np.mean(test_scores, axis=1)
test_std = np.std(test_scores, axis=1)
# 2
plt.figure(figsize=(15,10)) # Width, Height
# Training Set
plt.fill_between(train_sizes, train_mean-train_std,
train_mean+train_std, alpha=0.1, color="r")
# Cross Validation Set
plt.fill_between(train_sizes, test_mean-test_std,
test_mean+test_std, alpha=0.1, color="g")
# Graph Legend text
trainLabel = ('%.3f%% Training score' % (train_mean[4]))
testLabel = ('%.3f%% Cross-validation score' % (test_mean[4]))
# Plot lines
plt.plot(train_sizes, train_mean, 'o-', color="r", label=trainLabel)
plt.plot(train_sizes, test_mean, 'o-', color="g", label=testLabel)
# Place title, X-axis label, Y-axis label
plt.suptitle('Linear Regression: NBA PER', fontsize=20)
plt.xlabel('Training examples')
plt.ylabel('Accuracy')
# Set limit on Y-axis, Place graph legend
plt.ylim((0.5, 1.1))
plt.xlim((0, 6500))
plt.legend(loc="best")
# Print duration of program
print("--- %s seconds ---" % (time.time() - s_time))
plt.show()
def Analysis(_deg=1):
start_time = time.time()
# 1
X, y = BuildDataSet()
linear_regression = LinearRegression()
# 2
polynomial_features = PolynomialFeatures(degree=_deg, include_bias=False)
# 3
algorithm = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
#========================================================================== */
print('after Pipeline')
# 4
PlotLearningCurve(X, y, algorithm, start_time)
Analysis(3) |
import pyeccodes.accessors as _
def load(h):
def wrapped(h):
table2Version = h.get_l('table2Version')
indicatorOfParameter = h.get_l('indicatorOfParameter')
indicatorOfTypeOfLevel = h.get_l('indicatorOfTypeOfLevel')
level = h.get_l('level')
if table2Version == 1 and indicatorOfParameter == 61 and indicatorOfTypeOfLevel == 1 and level == 0:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 71:
return '%'
if table2Version == 1 and indicatorOfParameter == 65:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 85:
return 'K'
if table2Version == 1 and indicatorOfParameter == 86:
return 'kg m**-3'
if table2Version == 1 and indicatorOfParameter == 7 and indicatorOfTypeOfLevel == 1:
return 'm'
if table2Version == 1 and indicatorOfParameter == 87:
return '%'
if table2Version == 1 and indicatorOfParameter == 127:
return '~'
if table2Version == 1 and indicatorOfParameter == 126:
return 'J'
if table2Version == 1 and indicatorOfParameter == 125:
return 'N m**-2'
if table2Version == 1 and indicatorOfParameter == 124:
return 'N m**-2'
if table2Version == 1 and indicatorOfParameter == 120:
return 'W m**-3 sr**-1'
if table2Version == 1 and indicatorOfParameter == 119:
return 'W m**-1 sr**-1'
if table2Version == 1 and indicatorOfParameter == 117:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 116:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 115:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 114:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 113:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 112:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 111:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 110:
return 's'
if table2Version == 1 and indicatorOfParameter == 109:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 108:
return 's'
if table2Version == 1 and indicatorOfParameter == 107:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 106:
return 's'
if table2Version == 1 and indicatorOfParameter == 105:
return 'm'
if table2Version == 1 and indicatorOfParameter == 104:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 103:
return 's'
if table2Version == 1 and indicatorOfParameter == 102:
return 'm'
if table2Version == 1 and indicatorOfParameter == 101:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 100:
return 'm'
if table2Version == 1 and indicatorOfParameter == 99:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 98:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 97:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 96:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 95:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 94:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 93:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 92:
return 'm'
if table2Version == 1 and indicatorOfParameter == 91:
return '(0 - 1)'
if table2Version == 1 and indicatorOfParameter == 89:
return 'kg m**-3'
if table2Version == 1 and indicatorOfParameter == 88:
return 'kg kg**-1'
if table2Version == 1 and indicatorOfParameter == 86:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 82:
return 'm'
if table2Version == 1 and indicatorOfParameter == 80:
return 'K'
if table2Version == 1 and indicatorOfParameter == 77:
return 'K'
if table2Version == 1 and indicatorOfParameter == 70:
return 'm'
if table2Version == 1 and indicatorOfParameter == 69:
return 'm'
if table2Version == 1 and indicatorOfParameter == 68:
return 'm'
if table2Version == 1 and indicatorOfParameter == 67:
return 'm'
if table2Version == 1 and indicatorOfParameter == 64:
return 'kg m**-2 s**-1'
if table2Version == 1 and indicatorOfParameter == 63:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 60:
return '%'
if table2Version == 1 and indicatorOfParameter == 59:
return 'kg m**-2 s**-1'
if table2Version == 1 and indicatorOfParameter == 56:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 55:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 54:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 53:
return 'kg kg**-1'
if table2Version == 1 and indicatorOfParameter == 50:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 49:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 48:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 47:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 46:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 45:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 42:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 41:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 38:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 37:
return 'm**2 s**-2'
if table2Version == 1 and indicatorOfParameter == 31:
return 'Degree true'
if table2Version == 1 and indicatorOfParameter == 30:
return '~'
if table2Version == 1 and indicatorOfParameter == 29:
return '~'
if table2Version == 1 and indicatorOfParameter == 28:
return '~'
if table2Version == 1 and indicatorOfParameter == 27:
return 'gpm'
if table2Version == 1 and indicatorOfParameter == 26:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 25:
return 'K'
if table2Version == 1 and indicatorOfParameter == 24:
return 'K'
if table2Version == 1 and indicatorOfParameter == 23:
return '~'
if table2Version == 1 and indicatorOfParameter == 22:
return '~'
if table2Version == 1 and indicatorOfParameter == 21:
return '~'
if table2Version == 1 and indicatorOfParameter == 20:
return 'm'
if table2Version == 1 and indicatorOfParameter == 19:
return 'K m**-1'
if table2Version == 1 and indicatorOfParameter == 18:
return 'K'
if table2Version == 1 and indicatorOfParameter == 17:
return 'K'
if table2Version == 1 and indicatorOfParameter == 16:
return 'K'
if table2Version == 1 and indicatorOfParameter == 15:
return 'K'
if table2Version == 1 and indicatorOfParameter == 14:
return 'K'
if table2Version == 1 and indicatorOfParameter == 9:
return 'm'
if table2Version == 1 and indicatorOfParameter == 8:
return 'm'
if table2Version == 1 and indicatorOfParameter == 5:
return 'm'
if table2Version == 1 and indicatorOfParameter == 3:
return 'Pa s**-1'
if table2Version == 1 and indicatorOfParameter == 84:
return '%'
if table2Version == 1 and indicatorOfParameter == 76:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 78:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 123:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 122:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 121:
return 'W m**-2'
if table2Version == 1 and indicatorOfParameter == 79:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 75:
return '%'
if table2Version == 1 and indicatorOfParameter == 74:
return '%'
if table2Version == 1 and indicatorOfParameter == 73:
return '%'
if table2Version == 1 and indicatorOfParameter == 72:
return '%'
if table2Version == 1 and indicatorOfParameter == 66:
return 'm'
if table2Version == 1 and indicatorOfParameter == 62:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 10:
return 'kg m**-2'
if table2Version == 1 and indicatorOfParameter == 90:
return 'm'
if table2Version == 1 and indicatorOfParameter == 118:
return 'K'
if table2Version == 1 and indicatorOfParameter == 57:
return 'm of water equivalent'
if table2Version == 1 and indicatorOfParameter == 83:
return 'm'
if table2Version == 1 and indicatorOfParameter == 81:
return '(0 - 1)'
if table2Version == 1 and indicatorOfParameter == 17 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 1 and indicatorOfParameter == 11 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 1 and indicatorOfParameter == 34 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 33 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 52:
return '%'
if table2Version == 1 and indicatorOfParameter == 7:
return 'gpm'
if table2Version == 1 and indicatorOfParameter == 44:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 2:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 43:
return 's**-1'
if table2Version == 1 and indicatorOfParameter == 39:
return 'Pa s**-1'
if table2Version == 1 and indicatorOfParameter == 1 and indicatorOfTypeOfLevel == 1:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 51:
return 'kg kg**-1'
if table2Version == 1 and indicatorOfParameter == 34:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 33:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 11:
return 'K'
if table2Version == 1 and indicatorOfParameter == 6:
return 'm**2 s**-2'
if table2Version == 1 and indicatorOfParameter == 16 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 1 and indicatorOfParameter == 15 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 1 and indicatorOfParameter == 4:
return 'K m**2 kg**-1 s**-1'
if table2Version == 1 and indicatorOfParameter == 1:
return 'Pa'
if table2Version == 1 and indicatorOfParameter == 32:
return 'm s**-1'
if table2Version == 1 and indicatorOfParameter == 13:
return 'K'
if table2Version == 1 and indicatorOfParameter == 36:
return 'm**2 s**-1'
if table2Version == 1 and indicatorOfParameter == 35:
return 'm**2 s**-1'
if table2Version == 2 and indicatorOfParameter == 61 and indicatorOfTypeOfLevel == 1 and level == 0:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 71:
return '%'
if table2Version == 2 and indicatorOfParameter == 65:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 85:
return 'K'
if table2Version == 2 and indicatorOfParameter == 86:
return 'kg m**-3'
if table2Version == 2 and indicatorOfParameter == 7 and indicatorOfTypeOfLevel == 1:
return 'm'
if table2Version == 2 and indicatorOfParameter == 87:
return '%'
if table2Version == 2 and indicatorOfParameter == 127:
return '~'
if table2Version == 2 and indicatorOfParameter == 126:
return 'J'
if table2Version == 2 and indicatorOfParameter == 125:
return 'N m**-2'
if table2Version == 2 and indicatorOfParameter == 124:
return 'N m**-2'
if table2Version == 2 and indicatorOfParameter == 120:
return 'W m**-3 sr**-1'
if table2Version == 2 and indicatorOfParameter == 119:
return 'W m**-1 sr**-1'
if table2Version == 2 and indicatorOfParameter == 117:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 116:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 115:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 114:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 113:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 112:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 111:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 110:
return 's'
if table2Version == 2 and indicatorOfParameter == 109:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 108:
return 's'
if table2Version == 2 and indicatorOfParameter == 107:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 106:
return 's'
if table2Version == 2 and indicatorOfParameter == 105:
return 'm'
if table2Version == 2 and indicatorOfParameter == 104:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 103:
return 's'
if table2Version == 2 and indicatorOfParameter == 102:
return 'm'
if table2Version == 2 and indicatorOfParameter == 101:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 100:
return 'm'
if table2Version == 2 and indicatorOfParameter == 99:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 98:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 97:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 96:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 95:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 94:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 93:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 92:
return 'm'
if table2Version == 2 and indicatorOfParameter == 91:
return '(0 - 1)'
if table2Version == 2 and indicatorOfParameter == 89:
return 'kg m**-3'
if table2Version == 2 and indicatorOfParameter == 88:
return 'kg kg**-1'
if table2Version == 2 and indicatorOfParameter == 86:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 82:
return 'm'
if table2Version == 2 and indicatorOfParameter == 80:
return 'K'
if table2Version == 2 and indicatorOfParameter == 77:
return 'K'
if table2Version == 2 and indicatorOfParameter == 70:
return 'm'
if table2Version == 2 and indicatorOfParameter == 69:
return 'm'
if table2Version == 2 and indicatorOfParameter == 68:
return 'm'
if table2Version == 2 and indicatorOfParameter == 67:
return 'm'
if table2Version == 2 and indicatorOfParameter == 64:
return 'kg m**-2 s**-1'
if table2Version == 2 and indicatorOfParameter == 63:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 60:
return '%'
if table2Version == 2 and indicatorOfParameter == 59:
return 'kg m**-2 s**-1'
if table2Version == 2 and indicatorOfParameter == 56:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 55:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 54:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 53:
return 'kg kg**-1'
if table2Version == 2 and indicatorOfParameter == 50:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 49:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 48:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 47:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 46:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 45:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 42:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 41:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 38:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 37:
return 'm**2 s**-2'
if table2Version == 2 and indicatorOfParameter == 31:
return 'Degree true'
if table2Version == 2 and indicatorOfParameter == 30:
return '~'
if table2Version == 2 and indicatorOfParameter == 29:
return '~'
if table2Version == 2 and indicatorOfParameter == 28:
return '~'
if table2Version == 2 and indicatorOfParameter == 27:
return 'gpm'
if table2Version == 2 and indicatorOfParameter == 26:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 25:
return 'K'
if table2Version == 2 and indicatorOfParameter == 24:
return 'K'
if table2Version == 2 and indicatorOfParameter == 23:
return '~'
if table2Version == 2 and indicatorOfParameter == 22:
return '~'
if table2Version == 2 and indicatorOfParameter == 21:
return '~'
if table2Version == 2 and indicatorOfParameter == 20:
return 'm'
if table2Version == 2 and indicatorOfParameter == 19:
return 'K m**-1'
if table2Version == 2 and indicatorOfParameter == 18:
return 'K'
if table2Version == 2 and indicatorOfParameter == 17:
return 'K'
if table2Version == 2 and indicatorOfParameter == 16:
return 'K'
if table2Version == 2 and indicatorOfParameter == 15:
return 'K'
if table2Version == 2 and indicatorOfParameter == 14:
return 'K'
if table2Version == 2 and indicatorOfParameter == 9:
return 'm'
if table2Version == 2 and indicatorOfParameter == 8:
return 'm'
if table2Version == 2 and indicatorOfParameter == 5:
return 'm'
if table2Version == 2 and indicatorOfParameter == 3:
return 'Pa s**-1'
if table2Version == 2 and indicatorOfParameter == 84:
return '%'
if table2Version == 2 and indicatorOfParameter == 76:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 78:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 123:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 122:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 121:
return 'W m**-2'
if table2Version == 2 and indicatorOfParameter == 79:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 75:
return '%'
if table2Version == 2 and indicatorOfParameter == 74:
return '%'
if table2Version == 2 and indicatorOfParameter == 73:
return '%'
if table2Version == 2 and indicatorOfParameter == 72:
return '%'
if table2Version == 2 and indicatorOfParameter == 66:
return 'm'
if table2Version == 2 and indicatorOfParameter == 62:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 10:
return 'kg m**-2'
if table2Version == 2 and indicatorOfParameter == 90:
return 'm'
if table2Version == 2 and indicatorOfParameter == 118:
return 'K'
if table2Version == 2 and indicatorOfParameter == 57:
return 'm of water equivalent'
if table2Version == 2 and indicatorOfParameter == 83:
return 'm'
if table2Version == 2 and indicatorOfParameter == 81:
return '(0 - 1)'
if table2Version == 2 and indicatorOfParameter == 17 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 2 and indicatorOfParameter == 11 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 2 and indicatorOfParameter == 34 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 33 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 52:
return '%'
if table2Version == 2 and indicatorOfParameter == 7:
return 'gpm'
if table2Version == 2 and indicatorOfParameter == 44:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 2:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 43:
return 's**-1'
if table2Version == 2 and indicatorOfParameter == 39:
return 'Pa s**-1'
if table2Version == 2 and indicatorOfParameter == 1 and indicatorOfTypeOfLevel == 1:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 51:
return 'kg kg**-1'
if table2Version == 2 and indicatorOfParameter == 34:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 33:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 11:
return 'K'
if table2Version == 2 and indicatorOfParameter == 6:
return 'm**2 s**-2'
if table2Version == 2 and indicatorOfParameter == 16 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 2 and indicatorOfParameter == 15 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 2 and indicatorOfParameter == 4:
return 'K m**2 kg**-1 s**-1'
if table2Version == 2 and indicatorOfParameter == 1:
return 'Pa'
if table2Version == 2 and indicatorOfParameter == 32:
return 'm s**-1'
if table2Version == 2 and indicatorOfParameter == 13:
return 'K'
if table2Version == 2 and indicatorOfParameter == 36:
return 'm**2 s**-1'
if table2Version == 2 and indicatorOfParameter == 35:
return 'm**2 s**-1'
if table2Version == 3 and indicatorOfParameter == 61 and indicatorOfTypeOfLevel == 1 and level == 0:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 71:
return '%'
if table2Version == 3 and indicatorOfParameter == 65:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 85:
return 'K'
if table2Version == 3 and indicatorOfParameter == 86:
return 'kg m**-3'
if table2Version == 3 and indicatorOfParameter == 7 and indicatorOfTypeOfLevel == 1:
return 'm'
if table2Version == 3 and indicatorOfParameter == 87:
return '%'
if table2Version == 3 and indicatorOfParameter == 127:
return '~'
if table2Version == 3 and indicatorOfParameter == 126:
return 'J'
if table2Version == 3 and indicatorOfParameter == 125:
return 'N m**-2'
if table2Version == 3 and indicatorOfParameter == 124:
return 'N m**-2'
if table2Version == 3 and indicatorOfParameter == 120:
return 'W m**-3 sr**-1'
if table2Version == 3 and indicatorOfParameter == 119:
return 'W m**-1 sr**-1'
if table2Version == 3 and indicatorOfParameter == 117:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 116:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 115:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 114:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 113:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 112:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 111:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 110:
return 's'
if table2Version == 3 and indicatorOfParameter == 109:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 108:
return 's'
if table2Version == 3 and indicatorOfParameter == 107:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 106:
return 's'
if table2Version == 3 and indicatorOfParameter == 105:
return 'm'
if table2Version == 3 and indicatorOfParameter == 104:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 103:
return 's'
if table2Version == 3 and indicatorOfParameter == 102:
return 'm'
if table2Version == 3 and indicatorOfParameter == 101:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 100:
return 'm'
if table2Version == 3 and indicatorOfParameter == 99:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 98:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 97:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 96:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 95:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 94:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 93:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 92:
return 'm'
if table2Version == 3 and indicatorOfParameter == 91:
return '(0 - 1)'
if table2Version == 3 and indicatorOfParameter == 89:
return 'kg m**-3'
if table2Version == 3 and indicatorOfParameter == 88:
return 'kg kg**-1'
if table2Version == 3 and indicatorOfParameter == 86:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 82:
return 'm'
if table2Version == 3 and indicatorOfParameter == 80:
return 'K'
if table2Version == 3 and indicatorOfParameter == 77:
return 'K'
if table2Version == 3 and indicatorOfParameter == 70:
return 'm'
if table2Version == 3 and indicatorOfParameter == 69:
return 'm'
if table2Version == 3 and indicatorOfParameter == 68:
return 'm'
if table2Version == 3 and indicatorOfParameter == 67:
return 'm'
if table2Version == 3 and indicatorOfParameter == 64:
return 'kg m**-2 s**-1'
if table2Version == 3 and indicatorOfParameter == 63:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 60:
return '%'
if table2Version == 3 and indicatorOfParameter == 59:
return 'kg m**-2 s**-1'
if table2Version == 3 and indicatorOfParameter == 56:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 55:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 54:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 53:
return 'kg kg**-1'
if table2Version == 3 and indicatorOfParameter == 50:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 49:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 48:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 47:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 46:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 45:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 42:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 41:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 38:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 37:
return 'm**2 s**-2'
if table2Version == 3 and indicatorOfParameter == 31:
return 'Degree true'
if table2Version == 3 and indicatorOfParameter == 30:
return '~'
if table2Version == 3 and indicatorOfParameter == 29:
return '~'
if table2Version == 3 and indicatorOfParameter == 28:
return '~'
if table2Version == 3 and indicatorOfParameter == 27:
return 'gpm'
if table2Version == 3 and indicatorOfParameter == 26:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 25:
return 'K'
if table2Version == 3 and indicatorOfParameter == 24:
return 'K'
if table2Version == 3 and indicatorOfParameter == 23:
return '~'
if table2Version == 3 and indicatorOfParameter == 22:
return '~'
if table2Version == 3 and indicatorOfParameter == 21:
return '~'
if table2Version == 3 and indicatorOfParameter == 20:
return 'm'
if table2Version == 3 and indicatorOfParameter == 19:
return 'K m**-1'
if table2Version == 3 and indicatorOfParameter == 18:
return 'K'
if table2Version == 3 and indicatorOfParameter == 17:
return 'K'
if table2Version == 3 and indicatorOfParameter == 16:
return 'K'
if table2Version == 3 and indicatorOfParameter == 15:
return 'K'
if table2Version == 3 and indicatorOfParameter == 14:
return 'K'
if table2Version == 3 and indicatorOfParameter == 9:
return 'm'
if table2Version == 3 and indicatorOfParameter == 8:
return 'm'
if table2Version == 3 and indicatorOfParameter == 5:
return 'm'
if table2Version == 3 and indicatorOfParameter == 3:
return 'Pa s**-1'
if table2Version == 3 and indicatorOfParameter == 12:
return 'K'
if table2Version == 2 and indicatorOfParameter == 12:
return 'K'
if table2Version == 1 and indicatorOfParameter == 12:
return 'K'
if table2Version == 3 and indicatorOfParameter == 84:
return '%'
if table2Version == 3 and indicatorOfParameter == 76:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 78:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 123:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 122:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 121:
return 'W m**-2'
if table2Version == 3 and indicatorOfParameter == 79:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 75:
return '%'
if table2Version == 3 and indicatorOfParameter == 74:
return '%'
if table2Version == 3 and indicatorOfParameter == 73:
return '%'
if table2Version == 3 and indicatorOfParameter == 72:
return '%'
if table2Version == 3 and indicatorOfParameter == 66:
return 'm'
if table2Version == 3 and indicatorOfParameter == 62:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 10:
return 'kg m**-2'
if table2Version == 3 and indicatorOfParameter == 90:
return 'm'
if table2Version == 3 and indicatorOfParameter == 118:
return 'K'
if table2Version == 3 and indicatorOfParameter == 57:
return 'm of water equivalent'
if table2Version == 3 and indicatorOfParameter == 83:
return 'm'
if table2Version == 3 and indicatorOfParameter == 81:
return '(0 - 1)'
if table2Version == 3 and indicatorOfParameter == 17 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 3 and indicatorOfParameter == 11 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 3 and indicatorOfParameter == 34 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 33 and indicatorOfTypeOfLevel == 105 and level == 10:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 52:
return '%'
if table2Version == 3 and indicatorOfParameter == 7:
return 'gpm'
if table2Version == 3 and indicatorOfParameter == 44:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 2:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 43:
return 's**-1'
if table2Version == 3 and indicatorOfParameter == 39:
return 'Pa s**-1'
if table2Version == 3 and indicatorOfParameter == 1 and indicatorOfTypeOfLevel == 1:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 51:
return 'kg kg**-1'
if table2Version == 3 and indicatorOfParameter == 34:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 33:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 11:
return 'K'
if table2Version == 3 and indicatorOfParameter == 6:
return 'm**2 s**-2'
if table2Version == 3 and indicatorOfParameter == 16 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 3 and indicatorOfParameter == 15 and indicatorOfTypeOfLevel == 105 and level == 2:
return 'K'
if table2Version == 3 and indicatorOfParameter == 4:
return 'K m**2 kg**-1 s**-1'
if table2Version == 3 and indicatorOfParameter == 1:
return 'Pa'
if table2Version == 3 and indicatorOfParameter == 32:
return 'm s**-1'
if table2Version == 3 and indicatorOfParameter == 13:
return 'K'
if table2Version == 3 and indicatorOfParameter == 36:
return 'm**2 s**-1'
if table2Version == 3 and indicatorOfParameter == 35:
return 'm**2 s**-1'
return wrapped
|
import asyncio
import aiohttp
import discord
import io
import chat_exporter
from discord.ext import commands
from cogs.utils import hypixel
class Tickets(commands.Cog, name="Tickets"):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['reg', 'verify'])
async def register(self, ctx, name):
async with ctx.channel.typing():
author = ctx.author
if str(ctx.channel) == "register":
async with aiohttp.ClientSession() as session:
async with session.get(f'https://api.mojang.com/users/profiles/minecraft/{name}') as resp:
request = await resp.json(content_type=None)
if resp.status != 200:
embed = discord.Embed(title="Please enter a valid minecraft username!",
color=0xDE3163)
await ctx.channel.send(embed=embed)
elif request['name'] in self.bot.staff_names and self.bot.staff not in ctx.author.roles:
embed = discord.Embed(title="Staff impersonation is a punishable offense!",
color=0xDE3163)
await ctx.channel.send(embed=embed)
else:
ign = request['name']
uuid = request['id']
guild_name = await hypixel.get_guild(name)
nick = await author.edit(nick=ign)
if guild_name == "Miscellaneous":
await ctx.author.remove_roles(self.bot.new_member_role)
await ctx.channel.purge(limit=1)
embed = discord.Embed(title="Registration successful!")
embed.add_field(name=ign,
value="Member of Miscellaneous")
embed.set_thumbnail(url=f'https://visage.surgeplay.com/full/832/{uuid}')
await ctx.send(embed=embed)
await ctx.author.add_roles(self.bot.member_role)
elif guild_name in self.bot.misc_allies:
for guild in self.bot.misc_allies:
if guild == guild_name:
gtag = await hypixel.get_gtag(guild)
if ctx.author.nick is None or str(gtag) not in ctx.author.nick:
ign = ign + " " + str(gtag)
await ctx.author.edit(nick=ign)
await ctx.author.remove_roles(self.bot.new_member_role)
await ctx.author.add_roles(self.bot.guest, self.bot.ally)
await ctx.channel.purge(limit=1)
embed = discord.Embed(title="Registration successful!")
embed.set_thumbnail(url=f'https://visage.surgeplay.com/full/832/{uuid}')
embed.add_field(name=ign, value=f"Member of {guild}")
await ctx.send(embed=embed)
elif guild_name != "Miscellaneous" and guild_name not in self.bot.misc_allies:
await ctx.author.remove_roles(self.bot.new_member_role)
await ctx.author.add_roles(self.bot.awaiting_app)
if nick is None:
nick = author.name
await ctx.channel.purge(limit=1)
embed = discord.Embed(title="Registration successful!")
embed.set_thumbnail(url=f'https://visage.surgeplay.com/full/832/{uuid}')
embed.add_field(name=ign, value="New Member")
await ctx.send(embed=embed)
category = discord.utils.get(ctx.guild.categories, name="RTickets")
ticket_channel = await ctx.guild.create_text_channel(f"registration-ticket-{nick}",
category=category)
await ticket_channel.set_permissions(ctx.guild.get_role(ctx.guild.id), send_messages=False,
read_messages=False)
await ticket_channel.set_permissions(self.bot.staff, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(self.bot.t_officer, send_messages=True,
read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(author, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(self.bot.new_member_role, send_messages=False,
read_messages=False,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
try:
embed = discord.Embed(title="Miscellaneous Guild Requirements",
description="These requirements are subject to change!",
color=0x8368ff)
embed.add_field(name="Active",
value=f"• {format(self.bot.active, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="DNKL Eligibility",
value=f"• {format(self.bot.dnkl, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="Resident",
value=f"• {format(self.bot.resident_req, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="Member",
value=f"• {format(self.bot.inactive, ',d')} Weekly Guild Experience",
inline=False)
embed.add_field(name="New Member",
value=f"• {format(self.bot.new_member, ',d')} Daily Guild Experience",
inline=False)
embed.set_footer(
text="You are considered a New Member for the first 7 days after joining the guild"
"\nIf you fail to meet the New Member/Member requirements, you will be kicked!")
await ctx.author.send(embed=embed)
except Exception:
pass
await session.close()
else:
await ctx.send('This command can only be used in the registration channel!')
@commands.command(aliases=['del'])
async def delete(self, ctx):
"""Deletes the ticket channel the command is used in.
"""
transcript = await chat_exporter.export(ctx.channel)
if transcript is None:
pass
else:
transcript_file = discord.File(io.BytesIO(transcript.encode()),
filename=f"deleted-{ctx.channel.name}.html")
if self.bot.staff in ctx.author.roles:
if ctx.channel.category.name in self.bot.ticket_categories:
name = ctx.channel.name
embed = discord.Embed(title='This ticket will be deleted in 10 seconds!', description='',
color=0xDE3163)
msg = await ctx.send(embed=embed)
await asyncio.sleep(10)
await discord.TextChannel.delete(ctx.channel)
name = await hypixel.name_grabber(ctx.author)
embed = discord.Embed(title=f'{ctx.channel.name} was deleted by {name}',
description="", color=0x8368ff)
await self.bot.logs.send(embed=embed)
await self.bot.logs.send(file=transcript_file)
@commands.command()
@commands.has_any_role(538015368782807040, 522588122807271424)
async def add(self, ctx, member: discord.Member):
"""Adds the specified user to the ticket.
"""
if ctx.channel.category.name in self.bot.ticket_categories:
await ctx.channel.set_permissions(member, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
embed = discord.Embed(title=f"{member.name} has been added to the ticket!",
color=0x00A86B)
await ctx.send(embed=embed)
@commands.command()
@commands.has_any_role(538015368782807040, 522588122807271424)
async def remove(self, ctx, member: discord.Member):
"""Removes the specified user from the ticket.
"""
if ctx.channel.category.name in self.bot.ticket_categories:
await ctx.channel.set_permissions(member, send_messages=False, read_messages=False,
add_reactions=False, embed_links=False,
attach_files=False,
read_message_history=False, external_emojis=False)
embed = discord.Embed(title=f"{member.name} has been removed from the ticket!",
color=0x00A86B)
await ctx.send(embed=embed)
@commands.command()
@commands.has_any_role(538015368782807040, 522588122807271424)
async def rename(self, ctx, channel_name):
"""Renames the channel
"""
if ctx.channel.category.name in self.bot.ticket_categories:
channel_name = channel_name.replace(" ", "-")
await ctx.channel.edit(name=f"{channel_name}")
@commands.command()
@commands.has_any_role(538015368782807040, 522588122807271424)
async def transcript(self, ctx):
"""Creates a transcript for the channel the command is entered in
"""
if ctx.channel.category.name in self.bot.ticket_categories:
transcript = await chat_exporter.export(ctx.channel)
if transcript is None:
embed = discord.Embed(text="Transcript creation failed!",
color=0xDE3163)
await ctx.send(embed=embed)
return
transcript_file = discord.File(io.BytesIO(transcript.encode()),
filename=f"transcript-{ctx.channel.name}.html")
embed = discord.Embed(title="Transcript creation successful!",
color=0x00A86B)
await ctx.send(embed=embed)
await ctx.send(file=transcript_file)
@commands.command()
@commands.has_role(522588118251995147)
async def accept(self, ctx, member: discord.Member):
"""Used to accept staff applications. This command must be typed in the application channel. It doesn't work elsewhere.
"""
if ctx.channel.category.name in self.bot.ticket_categories:
embed = discord.Embed(title=f"Congratulations {member.name}, your staff application has been accepted!",
description="Please view the following as they'll help you become a better staff member!",
color=0x8368ff)
embed.set_footer(text="https://bit.ly/MiscStaffGuide\n"
"#staff-faq")
await ctx.send(embed=embed)
@commands.command()
@commands.has_any_role(522588118251995147, 522590574734213120)
async def deny(self, ctx, member: discord.Member, channel: discord.TextChannel):
"""Used to deny staff applications. This command can be used in any channel, provided, the syntax is met.
"""
name = await hypixel.name_grabber(member)
embed = discord.Embed(title=f"{name}, your application has been denied!",
description="The reasons are listed below",
color=0xDE3163)
embed.set_footer(
text="You may reapply in 2 weeks. \nFollowing is the transcript so that you can refer to it while reapplying.")
question_number = {
1: 'What is your age?',
2: 'How long have you been in the guild for?',
3: 'Have you had any past infractions on Hypixel?',
4: 'Why have you decided to apply for staff?',
5: 'What has brought you to Miscellaneous, and what has kept you here?',
6: 'What is something you could suggest that would improve the guild?',
7: 'You have just started as a trial officer and an officer starts arguing with another member. This argument starts to get serious quite quickly. What do you do?',
8: 'Suppose it\'s your first week of being a trial officer and you guild-mute a well-known player. Your guildmates start spamming you calling you a bad officer and telling you to unmute them. What would you do?',
9: 'Upon joining a game and you discover that a guild member is in your game and is hacking. What do you do?',
10: 'Have you been staff in any other guild or on any server? If yes, which one?',
11: 'How much time do you have to contribute to the role? (Per day)',
12: 'Tell us about a time you made a mistake within the last year. How did you deal with it? What did you learn?',
13: 'Anything else you would us to know?',
14: 'General Critiquing'
}
all_questions = ''
for x in range(1, 15):
question = question_number.get(int(x), 'None')
all_questions = all_questions + f"{x})" + question + "\n\n"
embed1 = discord.Embed(title="Questions", description=all_questions, color=0x8368ff)
await ctx.send(embed=embed1)
while True:
while True:
await ctx.send("What is the question number of the reply that you would like to critique?"
"\n**Please just give the question number!**"
"If you would like to critique something in general, reply with `14`")
question = await self.bot.wait_for('message',
check=lambda x: x.channel == ctx.channel and x.author == ctx.author)
question = question.content
if str(question) in ("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14"):
question = question_number.get(int(question), 'None')
break
else:
await ctx.send("Please respond with a valid number. (1-14)")
await ctx.send(f"`{question}`"
"\n**What was the issue that you found with their reply?**")
critique = await self.bot.wait_for('message',
check=lambda x: x.channel == ctx.channel and x.author == ctx.author)
critique = critique.content
embed.add_field(name=question,
value=critique,
inline=False)
z = 0
await ctx.send(embed=embed)
z = z + 1
embed1 = discord.Embed(title="Would you like to critique more questions?", color=0x8368ff)
embed1.add_field(name="If yes:", value="Reply with `Yes`")
embed1.add_field(name="If not:", value="Reply with `No`")
await ctx.send(embed=embed1)
more = await self.bot.wait_for('message',
check=lambda x: x.channel == ctx.channel and x.author == ctx.author)
more = more.content
more = more.capitalize()
if more in ('Yes', 'Yeah', 'Ye', 'Yea'):
continue
else:
await channel.send(embed=embed)
break
@commands.command()
async def new(self, ctx):
name = await hypixel.name_grabber(ctx.author)
category = discord.utils.get(self.bot.misc_guild.categories, name="🎫 Ticket Section")
ticket_channel = await self.bot.misc_guild.create_text_channel(f"ticket-{name}",
category=category,
topic="<:t:869239368060112906><:i:869239367942697010><:c:869239368383074414>"
"<:k:869239367854612480><:e:869239368517287936><:t:869239368060112906>")
creating_ticket = discord.Embed(title="Click here to go to your ticket!",
url=f"https://discord.com/channels/522586672148381726/{ticket_channel.id}",
color=0x00A86B)
creating_ticket.set_author(name="Ticket successfully created!")
await ctx.send(embed=creating_ticket)
await ticket_channel.set_permissions(self.bot.misc_guild.get_role(self.bot.misc_guild.id), send_messages=False,
read_messages=False)
await ticket_channel.set_permissions(self.bot.staff, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(self.bot.t_officer, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(ctx.author, send_messages=True, read_messages=True,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.set_permissions(self.bot.new_member_role, send_messages=False,
read_messages=False,
add_reactions=True, embed_links=True,
attach_files=True,
read_message_history=True, external_emojis=True)
await ticket_channel.send(f"{ctx.author.mention}")
def setup(bot):
bot.add_cog(Tickets(bot))
|
import argparse
import tqdm
import pickle
import numpy as np
import dmc2gym
import torch
import gin
import super_sac
from super_sac.wrappers import Uint8Wrapper, FrameStack
from train_dmc import IdentityEncoder
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--policy", type=str, required=True)
parser.add_argument("--save_experience", type=str, required=True)
parser.add_argument("--max_steps", type=int, default=1000)
parser.add_argument("--domain", type=str, default="walker")
parser.add_argument("--task", type=str, default="walk")
parser.add_argument("--episodes", type=int, default=10)
parser.add_argument("--config", type=str, required=True)
args = parser.parse_args()
gin.parse_config_file(args.config)
env = dmc2gym.make(
args.domain,
args.task,
visualize_reward=False,
from_pixels=True,
frame_skip=2,
)
env.reset()
state_space_size = env.current_state.shape[0]
env = Uint8Wrapper(FrameStack(env, 3))
# create agent
agent = super_sac.Agent(
act_space_size=env.action_space.shape[0],
encoder=IdentityEncoder(state_space_size),
)
agent.to(super_sac.device)
agent.load(args.policy)
agent.eval()
returns = []
reward_histories = []
actions, rewards, dones = [], [], []
state_keys = env.reset().keys()
states = {k:[] for k in state_keys}
next_states = {k:[] for k in state_keys}
ep_lengths = []
ep_sim_steps = []
for i in tqdm.tqdm(range(args.episodes)):
obs = env.reset()
done = False
totalr = 0.0
steps = 0
while not done and steps < args.max_steps:
internal_state = {"obs":env.current_state}
with torch.no_grad():
action = agent.sample_action(internal_state)
next_obs, rew, done, _ = env.step(action)
for key, val in obs.items():
states[key].append(val)
for key, val in next_obs.items():
next_states[key].append(val)
actions.append(action)
rewards.append(rew)
dones.append(done)
obs = next_obs
totalr += rew
steps += 1
ep_lengths.append(steps)
returns.append(totalr)
print("returns", returns)
print("ep_lengths", ep_lengths)
print("ep_sim_steps", ep_sim_steps)
print("mean return", np.mean(returns))
print("std of return", np.std(returns))
with open(args.save_experience, "wb") as f:
pickle.dump(
dict(
states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
dones=dones,
),
f,
)
if __name__ == "__main__":
main()
|
import discord
from discord.ext import commands
import datetime
class Snipe(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ta = "ORDER BY time DESC LIMIT 1"
@commands.Cog.listener()
async def on_message_delete(self, message):
if message.content == "":
return
try:
await self.bot.pool.execute("""
INSERT INTO snipe VALUES ($1, $2, $3, $4, $5, $6, $7)
""", message.content[:980], message.author.id, message.guild.id,
message.channel.id, message.id, message.author.bot,
datetime.datetime.utcnow())
except Exception as error:
print(f"\n{error}\n")
async def data(self, ctx, dta):
if dta == None:
await ctx.send("Couldn't find anything to snipe")
return
em = discord.Embed(color = discord.Color.green())
try:
em.set_author(name = self.bot.get_user(dta["usr"]), icon_url = self.bot.get_user(dta["usr"]).avatar_url)
except AttributeError:
deleted_user = "https://discordapp.com/assets/0e291f67c9274a1abdddeb3fd919cbaa.png"
em.set_author(name = "Missing User", icon_url = deleted_user)
em.add_field(name = "Message", value = dta["contents"][:980])
em.timestamp = dta["time"]
await ctx.send(embed = em)
@commands.group(invoke_without_command = True)
async def snipe(self, ctx):
data = await self.bot.pool.fetchrow(f"SELECT * FROM snipe WHERE guild = $1 AND channel = $2 {self.ta}", ctx.guild.id, ctx.channel.id)
await self.data(ctx, data)
@snipe.command(aliases = ["ch"])
async def channel(self, ctx, chid: discord.TextChannel):
if chid.is_nsfw() and not ctx.channel.is_nsfw():
em = discord.Embed(color=discord.Color.dark_teal())
em.add_field(name="NSFW Channel Required", value="You cannot snipe an NSFW channel from a non NSFW channel.")
await ctx.send(embed=em)
return
data = await self.bot.pool.fetchrow(f"SELECT * FROM snipe WHERE guild = $1 AND channel = $2 {self.ta}", ctx.guild.id, chid.id)
await self.data(ctx, data)
@snipe.command(aliases = ["user", "u"])
async def member(self, ctx, *, member: discord.Member):
data = await self.bot.pool.fetchrow(f"SELECT * FROM snipe WHERE guild = $1 AND channel = $2 AND usr = $3 {self.ta}",
ctx.guild.id, ctx.channel.id, member.id)
await self.data(ctx, data)
@snipe.command(aliases = ["c"])
async def count(self, ctx, c: int):
data = await self.bot.pool.fetchrow(f"SELECT * FROM snipe WHERE guild = $1 AND channel = $2 {self.ta} OFFSET $3",
ctx.guild.id, ctx.channel.id, c)
await self.data(ctx, data)
@snipe.command(name = "bot", aliases = ["b", "bots"])
async def _bot(self, ctx):
data = await self.bot.pool.fetchrow(f"SELECT * FROM snipe WHERE guild = $1 AND channel = $2 AND bot = true {self.ta}",
ctx.guild.id, ctx.channel.id)
await self.data(ctx, data)
@snipe.command(name = "list", aliases = ["l", "show", "recent"])
async def _list(self, ctx):
data = await self.bot.pool.fetch("SELECT * FROM snipe WHERE guild = $1 ORDER BY time DESC LIMIT 5", ctx.guild.id)
em = discord.Embed(color = discord.Color.green())
em.set_thumbnail(url = ctx.guild.icon_url)
if data == []:
em.add_field(name = f"{ctx.guild.name}'s Sniped Messages", value = "Couldn't find anything to snipe in this server")
else:
em.set_author(name = f"{ctx.guild.name}'s Sniped Messages")
for row in data:
try:
user = self.bot.get_user(row["usr"]).name
except AttributeError:
user = "User Not Found"
try:
ch = self.bot.get_channel(row["channel"]).name
except AttributeError:
ch = "Channel Not Found"
if len(user) > 17:
user = user[:-5] + "..."
if len(ch) > 19:
ch = ch[:-8] + "..."
em.add_field(name = f"**{user}**: #{ch}", value = row["contents"][:230], inline = False)
await ctx.send(embed = em)
def setup(bot):
bot.add_cog(Snipe(bot)) |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
class Tests:
preprocess_instance_count = (
"Pre-process instance counts are accurate",
"Unexpected number of pre-process instances found"
)
postprocess_instance_count = (
"Post-process instance counts are accurate",
"Unexpected number of post-process instances found"
)
def AltitudeFilter_FilterStageToggle():
"""
Summary:
Filter Stage toggle affects final vegetation position
Expected Result:
Vegetation instances plant differently depending on the Filter Stage setting.
PostProcess should cause some number of plants that appear above and below the desired altitude range to disappear.
:return: None
"""
import os
import azlmbr.legacy.general as general
import azlmbr.math as math
import editor_python_test_tools.hydra_editor_utils as hydra
from largeworlds.large_worlds_utils import editor_dynveg_test_helper as dynveg
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
PREPROCESS_INSTANCE_COUNT = 44
POSTPROCESS_INSTANCE_COUNT = 34
# Open an existing simple level
hydra.open_base_level()
general.set_current_view_position(512.0, 480.0, 38.0)
# Create basic vegetation entity
position = math.Vector3(512.0, 512.0, 32.0)
flower_asset_path = os.path.join("assets", "objects", "foliage", "grass_flower_pink.azmodel")
flower_prefab = dynveg.create_temp_mesh_prefab(flower_asset_path, "AltFilter_PinkFlower2")[0]
vegetation = dynveg.create_temp_prefab_vegetation_area("vegetation", position, 16.0, 16.0, 16.0, flower_prefab)
# Add a Vegetation Altitude Filter to the vegetation area entity
vegetation.add_component("Vegetation Altitude Filter")
# Create Surface for instances to plant on
dynveg.create_surface_entity("Surface_Entity_Parent", position, 16.0, 16.0, 1.0)
# Add entity with Mesh to replicate creation of hills
hill_entity = dynveg.create_mesh_surface_entity_with_slopes("hill", position, 10.0)
# Set a Min Altitude of 38 and Max of 40 in Vegetation Altitude Filter
vegetation.get_set_test(3, "Configuration|Altitude Min", 38.0)
vegetation.get_set_test(3, "Configuration|Altitude Max", 40.0)
# Create a new entity as a child of the vegetation area entity with Random Noise Gradient Generator, Gradient
# Transform Modifier, and Box Shape component
random_noise = hydra.Entity("random_noise")
random_noise.create_entity(position, ["Random Noise Gradient", "Gradient Transform Modifier", "Box Shape"])
random_noise.set_test_parent_entity(vegetation)
# Add a Vegetation Position Modifier to the vegetation area entity.
vegetation.add_component("Vegetation Position Modifier")
# Pin the Random Noise entity to the Gradient Entity Id field of the Position Modifier's Gradient X
vegetation.get_set_test(4, "Configuration|Position X|Gradient|Gradient Entity Id", random_noise.id)
# Toggle between PreProcess and PostProcess in Vegetation Altitude Filter
vegetation.get_set_test(3, "Configuration|Filter Stage", 1)
result = helper.wait_for_condition(lambda: dynveg.validate_instance_count(position, 30.0, PREPROCESS_INSTANCE_COUNT), 2.0)
Report.result(Tests.preprocess_instance_count, result)
vegetation.get_set_test(3, "Configuration|Filter Stage", 2)
result = helper.wait_for_condition(lambda: dynveg.validate_instance_count(position, 30.0, POSTPROCESS_INSTANCE_COUNT), 2.0)
Report.result(Tests.postprocess_instance_count, result)
if __name__ == "__main__":
from editor_python_test_tools.utils import Report
Report.start_test(AltitudeFilter_FilterStageToggle)
|
from setuptools import setup
setup(
name='flask-apidoc',
version='1.1.2',
packages=['flask_apidoc'],
url='https://github.com/viniciuschiele/flask-apidoc',
license='MIT',
author='Vinicius Chiele',
author_email='vinicius.chiele@gmail.com',
description='Adds ApiDoc support to Flask',
keywords=['flask', 'apidoc', 'doc', 'documentation', 'rest', 'restful'],
install_requires=['flask>=0.10.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython'
]
)
|
from flask import Flask, jsonify
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy.pool import StaticPool
engine = create_engine("sqlite:///Resources/hawaii.sqlite", connect_args={"check_same_thread": False}, poolclass=StaticPool, echo=True)
Base = automap_base()
Base.prepare(engine, reflect=True)
Measurement = Base.classes.measurement
Station = Base.classes.station
session = Session(engine)
app = Flask(__name__)
@app.route("/")
def welcome():
return """<html>
<h1>Hawaii Climate App (Flask API)</h1>
<p>Precipitation Analysis:</p>
<ul>
<li><a href="/api/v1.0/precipitation">/api/v1.0/precipitation</a></li>
</ul>
<p>Station Analysis:</p>
<ul>
<li><a href="/api/v1.0/stations">/api/v1.0/stations</a></li>
</ul>
<p>Temperature Analysis:</p>
<ul>
<li><a href="/api/v1.0/tobs">/api/v1.0/tobs</a></li>
</ul>
<p>Start Day Analysis:</p>
<ul>
<li><a href="/api/v1.0/2017-03-14">/api/v1.0/2017-03-14</a></li>
</ul>
<p>Start & End Day Analysis:</p>
<ul>
<li><a href="/api/v1.0/2017-03-14/2017-03-28">/api/v1.0/2017-03-14/2017-03-28</a></li>
</ul>
</html>
"""
@app.route("/api/v1.0/precipitation")
def precipitation():
one_year_ago = dt.date(2017,8,23) - dt.timedelta(days=365)
prcp_data = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= one_year_ago).\
order_by(Measurement.date).all()
prcp_data_list = dict(prcp_data)
return jsonify(prcp_data_list)
@app.route("/api/v1.0/stations")
def stations():
stations_all = session.query(Station.station, Station.name).all()
station_list = list(stations_all)
return jsonify(station_list)
@app.route("/api/v1.0/tobs")
def tobs():
one_year_ago = dt.date(2017,8,23) - dt.timedelta(days=365)
tobs_data = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= one_year_ago).\
order_by(Measurement.date).all()
tobs_data_list = list(tobs_data)
return jsonify(tobs_data_list)
@app.route("/api/v1.0/<start>")
def start_day(start):
start_day = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
group_by(Measurement.date).all()
start_day_list = list(start_day)
return jsonify(start_day_list)
@app.route("/api/v1.0/<start>/<end>")
def start_end_day(start, end):
start_end_day = session.query(Measurement.date, func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start).\
filter(Measurement.date <= end).\
group_by(Measurement.date).all()
start_end_day_list = list(start_end_day)
return jsonify(start_end_day_list)
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
"""
File Name: maxDepth
Author : jing
Date: 2020/4/13
二叉树的深度
https://leetcode-cn.com/problems/er-cha-shu-de-shen-du-lcof/
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if root is None:
return 0
left = self.maxDepth(root.left)
right = self.maxDepth(root.right)
return max(left, right) + 1
|
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Unit test suite for aws_encryption_sdk.identifiers.AlgorithmSuite."""
import pytest
from aws_encryption_sdk.identifiers import AlgorithmSuite
@pytest.mark.parametrize(
"suite",
(
AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY,
AlgorithmSuite.AES_256_GCM_HKDF_SHA512_COMMIT_KEY_ECDSA_P384,
),
)
def test_committing_suites_properties(suite):
assert suite.is_committing()
assert suite.message_format_version == 0x02
assert suite.message_id_length() == 32
@pytest.mark.parametrize(
"suite",
(
AlgorithmSuite.AES_128_GCM_IV12_TAG16,
AlgorithmSuite.AES_192_GCM_IV12_TAG16,
AlgorithmSuite.AES_256_GCM_IV12_TAG16,
AlgorithmSuite.AES_128_GCM_IV12_TAG16_HKDF_SHA256,
AlgorithmSuite.AES_192_GCM_IV12_TAG16_HKDF_SHA256,
AlgorithmSuite.AES_256_GCM_IV12_TAG16_HKDF_SHA256,
AlgorithmSuite.AES_128_GCM_IV12_TAG16_HKDF_SHA256_ECDSA_P256,
AlgorithmSuite.AES_192_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
AlgorithmSuite.AES_256_GCM_IV12_TAG16_HKDF_SHA384_ECDSA_P384,
),
)
def test_noncommitting_suites_properties(suite):
assert not suite.is_committing()
assert suite.message_format_version == 0x01
assert suite.message_id_length() == 16
|
from tool.runners.python import SubmissionPy
class BadouralixSubmission(SubmissionPy):
def __init__(self):
self.preamble_size = 25
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
xmas = list(map(int, s.split()))
for i in range(len(xmas) - self.preamble_size):
if not self.isvalid(
xmas[i + self.preamble_size], xmas[i : i + self.preamble_size]
):
invalid_number = xmas[i + self.preamble_size]
i = 0
j = 0
while True:
if i == j:
j += 1
# Do not build a set here, it is way faster to keep the list
local_sum = sum(xmas[i : j + 1])
if local_sum == invalid_number:
return min(xmas[i : j + 1]) + max(xmas[i : j + 1])
elif local_sum < invalid_number:
j += 1
elif local_sum > invalid_number:
i += 1
@staticmethod
def isvalid(target, previous):
for i in range(len(previous) - 1):
if target - previous[i] in previous[i + 1 :]:
return True
return False
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple key-value cache."""
from google.appengine.ext import ndb
class KeyValueCache(ndb.Model):
"""Data cached that doesn't require a seperate model.
Attributes:
recorded_dt = DateTime, time of insertion.
text_value = Text, value inserted.
"""
recorded_dt = ndb.DateTimeProperty(auto_now_add=True)
value = ndb.JsonProperty()
|
from threading import Thread
from time import sleep
from pratidarshan import (
pradarshanam,
alert,
get_registry,
lang_code,
AJAY,
sahAyikA,
ver,
Tk,
ttk,
display_lang_lists,
)
from pystray import MenuItem as item, Menu as menu, Icon as SysTray
from PIL import Image
from kuJjikopalambhan import kuYjikolambhikam
from os import startfile
from urllib.request import urlopen
from sys import argv
from json import loads
class Main:
def __init__(self, dbg):
self.ks = get_registry("anuprayogasthiti")
self.sg_status = bool(get_registry("lekhanasahAyikA"))
self.msg = set([])
self.akSharAH = {}
self.display_data = {}
self.loaded_scripts = []
self.loaded_display_lng = []
self.lang_mode = ""
self.debug = dbg
self.load_typ_lng(lang_code[2][get_registry("lekhanbhAShA")])
self.temp = self.ks
self.darshan = ""
self.load_display_lng(display_lang_lists[get_registry("bhAShAnuprayogaH")])
self.value_change = [False, False]
th_tk = Thread(target=self.start_tk, name="TK")
th_tk.daemon = True
self.sandesh = set([])
self.window_start_status = get_registry("koShThaprArambha")
self.tk_status = False
self.sa_lang = self.akSharAH[self.lang_mode]["sa"]
th_tk.start()
self.sg = sahAyikA(self)
self.tray = None
def load_typ_lng(self, lang):
if lang not in self.loaded_scripts:
fl = open(
f"resources/dattAMsh/{lang}.json",
encoding="utf-8",
mode="r+",
)
self.akSharAH[lang] = loads(fl.read())
fl.close()
self.loaded_scripts.append(lang)
self.lang_mode = lang
def load_display_lng(self, lang):
if lang not in self.loaded_display_lng:
fl = open(
f"resources/dattAMsh/display/{lang}.json",
encoding="utf-8",
mode="r+",
)
self.display_data[lang] = loads(fl.read())
fl.close()
self.loaded_display_lng.append(lang)
self.darshan = lang
def give_startup_msg(self):
a = AJAY[self.lang_mode]
self.akSharaH = {}
self.loaded_scripts = []
if self.akSharAH[self.lang_mode]["sa"] == 0:
a = a[:-1]
text = self.r.l_data["startup_msg"].format(
(self.r.l_data["off"], self.r.l_data["on"])[self.ks],
(self.r.l_data["off"], self.r.l_data["on"])[self.sg_status],
lang_code[1][self.lang_mode],
"ajay➠" + a,
)
alert(text, color="green", lapse=4200, geo=True, AkAra=14)
def get(self, name, val=0):
if name == "ks":
return self.ks
elif name == "display_data":
return self.display_data[self.darshan]
elif name == "clicked":
return self.sg.varna_clicked
elif name == "img_pressed":
return self.sg.image_pressed
elif name == "reset_img_pressed":
self.sg.image_pressed = False
elif name == "varna_pressed":
return self.sg.varna_clicked_st
elif name == "reset_varna_pressed":
self.sg.varna_clicked_st = False
elif name == "reset_no_procedure":
self.sg.no_procedure = False
elif name == "sandesh":
return self.sandesh
elif name == "set_val_change":
self.value_change[val] = False
elif name == "get_val_change":
return self.value_change
elif name == "reset_sandesh":
self.sandesh = set([])
elif name == "clear_sg_val":
self.sg.pUrvavarNa = [("", "", -1), ""]
elif name == "sg_status":
return self.sg_status
elif name == "show_sg":
self.sg.show(val)
elif name == "hide_sg":
self.sg.hide(True)
elif name == "tk":
return self.tk_status
elif name == "time_exceed":
alert(
self.r.l_data["time_exceed"],
color="red",
lapse=9000,
geo=True,
AkAra=16,
bg="white",
)
elif name == "show_status":
alert(
(
self.r.l_data["values"]["typing_lang_main"]
+ " : {0}\n"
+ self.r.l_data["anuprayog"]
+ " : {1}"
+ "\n{2} : {1}\n{3}"
).format(
self.r.typing_lang.get(),
(self.r.l_data["off"], self.r.l_data["on"])[self.ks],
self.r.l_data["values"]["sahayika"],
self.r.ajay_texts[self.r.sanskrit_mode.get()].get(),
),
color="blue",
lapse=1400,
geo=True,
AkAra=16,
bg="#faf9ae",
)
elif name == "change_less":
self.change(True, True)
elif name == "msg":
return self.msg
elif name == "title_text":
c = self.display_data[self.darshan]
d = [c["off"], c["on"]]
return c["tray"]["title"].format(
d[self.ks], c["scripts"][self.lang_mode], d[self.sg_status]
)
elif name == "sa":
return self.sa_lang
elif name == "lang":
return self.lang_mode
elif name == "update_sans":
self.r.update_sans_mode(1, val)
elif name == "null_msg":
self.msg = set([])
elif name == "close_from":
self.sandesh.add("close")
self.value_change[0] = True
def exec_taskbar_commands(self, n, m=True):
if n == "show":
self.r.show()
elif n == "sg":
self.sg_status = not self.sg_status
self.msg.add("sg")
self.value_change[1] = True
self.r.sg_button.configure(image=self.r.image1[int(self.sg_status)])
if not self.sg_status:
self.get("hide_sg")
self.sandesh.add("sg")
self.sandesh.add("title")
self.value_change[0] = True
elif n == "sg_on":
self.sg_status = True
self.r.sg_button.configure(image=self.r.image1[int(self.sg_status)])
self.msg.add("sg")
self.value_change[1] = True
self.sandesh.add("sg")
self.sandesh.add("title")
self.value_change[0] = True
alert(
self.r.l_data["sahayika_on"],
color="green",
bg="white",
)
elif n == "sg_off":
self.sg_status = False
self.r.sg_button.configure(image=self.r.image1[int(self.sg_status)])
self.msg.add("sg")
self.value_change[1] = True
self.get("hide_sg")
self.sandesh.add("sg")
self.sandesh.add("title")
self.value_change[0] = True
alert(
self.r.l_data["sahayika_off"],
color="red",
bg="white",
)
elif n == "close_set_false":
self.close_status = True
elif n == "restart":
alert(
self.r.l_data["tray"]["restarted"],
color="purple",
)
self.msg.add("restart")
self.value_change[1] = True
if "sg" in n:
self.r.title_ref["sahayika"].update_lekha(
self.r.l_data["title"]["sahayika" + str(int(self.sg_status))]
)
def update_typ_lang(self, l, from_win=False):
if not from_win:
self.r.typing_lang.set(lang_code[1][l])
else:
l = lang_code[0][l]
self.load_typ_lng(l)
self.msg.add("change_lang")
self.value_change[1] = True
t = "ajay➠" + AJAY[lang_code[0][self.r.typing_lang.get()]]
self.r.ajay_texts[0].set(t[:-1])
self.r.ajay_texts[1].set(t)
alert(
f'{self.r.l_data["menu_values"]["typing_lang"]} ➠ {self.r.l_data["scripts"][l]}',
color="green",
lapse=1200,
)
self.sa_lang = self.akSharAH[self.lang_mode]["sa"]
self.r.sanskrit_mode.set(self.sa_lang)
self.sandesh.add("lang")
self.sandesh.add("title")
if l in ("Urdu", "Romanized"):
self.r.fr_ajay.grid_forget()
else:
self.r.fr_ajay.grid(row=0, column=2, sticky="nw", pady=(1.5, 0))
self.value_change[0] = True
def open_img(self):
self.r.open_img()
def start_tk(self):
if self.debug:
self.r = pradarshanam(self)
self.r = self.r.prArambh()
self.r.init()
else:
try:
self.r = pradarshanam(self)
self.r = self.r.prArambh()
self.r.init()
except:
try:
startfile("lekhika.exe")
except:
startfile("lekhika.py")
self.sandesh.add("close_just")
self.value_change[0] = True
def change(self, n=False, less=False, from_win=False, o=None):
if n:
self.ks = abs(self.ks - 1) if o == None else o
self.r.karyAsthiti = self.ks
else:
self.ks = self.r.karyAsthiti
if self.temp == self.ks:
return
self.r.kAryaM.configure(image=self.r.image[self.r.karyAsthiti])
self.temp = self.ks
if not less:
self.msg.add("update_ks")
self.msg.add("clear_vals")
self.value_change[1] = True
color = ("red", "green")[self.ks]
self.sandesh.add("ks")
self.r.title_ref["main"].update_lekha(
self.r.l_data["title"]["main" + str(self.ks)]
)
self.sandesh.add("title")
self.value_change[0] = True
alert(
{1: self.r.l_data["turned_on"], 0: self.r.l_data["turned_off"]}.get(
self.ks
),
color,
bg="white",
) if not from_win else None
if __name__ == "__main__":
class TaskBar:
def init(self, val):
self.display = val.get("display_data")
self.lang = val.get("lang")
self.ks = val.get("ks")
self.sg = val.get("sg_status")
menu_options = self.__menu_object()
self.systray = SysTray(
"Lipi Lekhika",
Image.open(r"resources\img\main.webp"),
val.get("title_text"),
menu_options,
)
val.exec_taskbar_commands("close_set_false")
self.val = val
th = Thread(target=self.__check_value_updates)
th.daemon = True
th.start()
def __menu_object(self):
global key
return menu(
item(
"🔄 " + self.display["tray"]["restart"],
lambda k: val.exec_taskbar_commands("restart"),
radio=False,
),
item(
self.display["on"],
lambda k: val.change(True, o=1),
checked=lambda item: self.ks == 1,
radio=True,
),
item(
self.display["off"],
lambda k: val.change(True, o=0),
checked=lambda item: self.ks == 0,
radio=True,
),
menu.SEPARATOR,
item(
self.display["values"]["sahayika"],
menu(
item(
self.display["on"],
lambda _: val.exec_taskbar_commands("sg_on"),
checked=lambda item: self.sg == True,
radio=True,
),
item(
self.display["off"],
lambda _: val.exec_taskbar_commands("sg_off"),
checked=lambda item: self.sg == False,
radio=True,
),
),
),
item(
self.display["values"]["typing_lang_main"],
menu(
item(
lang_code[1]["Brahmi"],
lambda _: val.update_typ_lang("Brahmi"),
checked=lambda item: self.lang == "Brahmi",
radio=True,
),
item(
lang_code[1]["Granth"],
lambda _: val.update_typ_lang("Granth"),
checked=lambda item: self.lang == "Granth",
radio=True,
),
item(
lang_code[1]["Siddham"],
lambda _: val.update_typ_lang("Siddham"),
checked=lambda item: self.lang == "Siddham",
radio=True,
),
item(
lang_code[1]["Sharada"],
lambda _: val.update_typ_lang("Sharada"),
checked=lambda item: self.lang == "Sharada",
radio=True,
),
item(
lang_code[1]["Modi"],
lambda _: val.update_typ_lang("Modi"),
checked=lambda item: self.lang == "Modi",
radio=True,
),
item(
lang_code[1]["Tamil-Extended"],
lambda _: val.update_typ_lang("Tamil-Extended"),
checked=lambda item: self.lang == "Tamil-Extended",
radio=True,
),
item(
lang_code[1]["Sinhala"],
lambda _: val.update_typ_lang("Sinhala"),
checked=lambda item: self.lang == "Sharada",
radio=True,
),
menu.SEPARATOR,
item(
lang_code[1]["Romanized"],
lambda _: val.update_typ_lang("Romanized"),
checked=lambda item: self.lang == "Romanized",
radio=True,
),
menu.SEPARATOR,
item(
lang_code[1]["Urdu"],
lambda _: val.update_typ_lang("Urdu"),
checked=lambda item: self.lang == "Urdu",
radio=True,
),
menu.SEPARATOR,
item(
lang_code[1]["Punjabi"],
lambda _: val.update_typ_lang("Punjabi"),
checked=lambda item: self.lang == "Punjabi",
radio=True,
),
item(
lang_code[1]["Purna-Devanagari"],
lambda _: val.update_typ_lang("Purna-Devanagari"),
checked=lambda item: self.lang == "Purna-Devanagari",
radio=True,
),
item(
lang_code[1]["Nepali"],
lambda _: val.update_typ_lang("Nepali"),
checked=lambda item: self.lang == "Nepali",
radio=True,
),
item(
lang_code[1]["Sanskrit"],
lambda _: val.update_typ_lang("Sanskrit"),
checked=lambda item: self.lang == "Sanskrit",
radio=True,
),
item(
lang_code[1]["Assamese"],
lambda _: val.update_typ_lang("Assamese"),
checked=lambda item: self.lang == "Assamese",
radio=True,
),
item(
lang_code[1]["Konkani"],
lambda _: val.update_typ_lang("Konkani"),
checked=lambda item: self.lang == "Konkani",
radio=True,
),
item(
lang_code[1]["Oriya"],
lambda _: val.update_typ_lang("Oriya"),
checked=lambda item: self.lang == "Oriya",
radio=True,
),
item(
lang_code[1]["Kannada"],
lambda _: val.update_typ_lang("Kannada"),
checked=lambda item: self.lang == "Kannada",
radio=True,
),
item(
lang_code[1]["Malayalam"],
lambda _: val.update_typ_lang("Malayalam"),
checked=lambda item: self.lang == "Malayalam",
radio=True,
),
item(
lang_code[1]["Gujarati"],
lambda _: val.update_typ_lang("Gujarati"),
checked=lambda item: self.lang == "Gujarati",
radio=True,
),
item(
lang_code[1]["Marathi"],
lambda _: val.update_typ_lang("Marathi"),
checked=lambda item: self.lang == "Marathi",
radio=True,
),
item(
lang_code[1]["Tamil"],
lambda _: val.update_typ_lang("Tamil"),
checked=lambda item: self.lang == "Tamil",
radio=True,
),
item(
lang_code[1]["Telugu"],
lambda _: val.update_typ_lang("Telugu"),
checked=lambda item: self.lang == "Telugu",
radio=True,
),
item(
lang_code[1]["Bengali"],
lambda _: val.update_typ_lang("Bengali"),
checked=lambda item: self.lang == "Bengali",
radio=True,
),
item(
lang_code[1]["Hindi"],
lambda _: val.update_typ_lang("Hindi"),
checked=lambda item: self.lang == "Hindi",
radio=True,
),
),
),
item(
self.display["menu_values"]["encoding_table"],
lambda x: val.open_img(),
),
menu.SEPARATOR,
item(
"💻" + self.display["tray"]["show"],
lambda _: val.exec_taskbar_commands("show"),
),
item("❌ " + self.display["tray"]["exit"], lambda k: self.close()),
)
def close(self, k=False):
if not k:
alert(
self.display["exit_msg"],
color="red",
lapse=500,
geo=True,
wait=True,
)
self.systray.visible = False
self.systray.stop()
def __update_values(self):
sam = self.val.get("sandesh")
for x in sam:
if x == "sg":
self.sg = self.val.get("sg_status")
elif x == "lang":
self.lang = self.val.get("lang")
elif x == "ks":
self.ks = val.get("ks")
elif x == "app_lang":
self.display = self.display = val.get("display_data")
self.systray.menu = self.__menu_object()
self.systray.title = self.val.get("title_text")
self.systray._update_title()
elif x == "title":
self.systray.title = val.get("title_text")
self.systray._update_title()
elif x == "close":
self.close()
return
elif x == "close_just":
self.close(True)
return
val.get("reset_sandesh")
self.systray.update_menu()
def __check_value_updates(self):
while True:
a = self.val.get("get_val_change")[0]
if a:
self.__update_values()
self.val.get("set_val_change", 0)
else:
sleep(0.5)
dbg = False
try:
args = argv[-1]
if args == "doShAnusandhAna":
dbg = True
except:
pass
val = Main(dbg)
tsk = TaskBar()
key = kuYjikolambhikam(val)
tsk.init(val)
def update():
ver1 = 0
try:
o = urlopen("https://get.lipilekhika.com/navasanskaranam")
ver1 = float(o.read().decode("utf-8"))
except:
pass
if ver1 > ver:
def check_decision(n, tk):
if n:
import webbrowser as web
web.open("https://rebrand.ly/lekhika")
tk.destroy()
global tsk
text = tsk.display
root = Tk()
style = ttk.Style(root)
root.title("")
root.configure(bg="#faf9ae")
root.wm_overrideredirect(True)
root.eval("tk::PlaceWindow . center")
root.attributes("-topmost", True)
root.after(650, lambda: root.attributes("-topmost", True))
root.after(2600, lambda: root.attributes("-topmost", False))
style.configure(
"A.TLabel",
font=("Nirmala UI", 14, "bold"),
foreground="brown",
background="#faf9ae",
)
style.configure(
"Q.TButton", font=("Nirmala UI", 10, "bold"), foreground="green"
)
style.configure(
"W.TButton", font=("Nirmala UI", 10, "bold"), foreground="red"
)
style.configure("R.TFrame", background="#faf9ae")
frm1 = ttk.Frame(style="R.TFrame")
frm1.grid(row=0, column=0, sticky="nw")
ttk.Label(
frm1, text=text["download_msg"], justify="center", style="A.TLabel"
).grid(row=0, column=0, sticky="nw")
frm = ttk.Frame(frm1)
ttk.Button(
frm,
text=text["yes"],
style="Q.TButton",
command=lambda: check_decision(True, root),
).grid(row=0, column=0, sticky="n")
ttk.Button(
frm,
text=text["no"],
style="W.TButton",
command=lambda: check_decision(False, root),
).grid(row=0, column=1, sticky="n")
frm.grid(row=1, column=0, sticky="n")
root.after(30000, lambda: root.destroy())
root.mainloop()
if not val.debug:
y = Thread(target=update)
y.daemon = True
y.start()
tsk.systray.run()
|
from random import randrange
from threading import Barrier, Thread
from time import ctime, sleep
num_runners = 3
finish_line = Barrier(num_runners)
runners = ['Huey', 'Dewey', 'Louie']
def runner():
name = runners.pop()
sleep(randrange(2, 5))
print('%s reached the barrier at: %s \n' % (name, ctime()))
finish_line.wait()
def main():
threads = []
print('START RACE!!!!')
for i in range(num_runners):
threads.append(Thread(target=runner))
threads[-1].start()
for thread in threads:
thread.join()
print('Race over!')
if __name__ == "__main__":
main()
|
import logging
from sentry_sdk import Hub
async def test_capture_exc(catch_sentry):
assert not catch_sentry
try:
1 / 0
except ZeroDivisionError:
Hub.current.capture_exception()
assert catch_sentry
async def test_logging(catch_sentry):
assert not catch_sentry
logging.getLogger().critical('error')
assert catch_sentry
|
import markdown
from commons import file_name_tools
from commons.file import file_utils
import bleach
from commons.errors import PageNotFoundError
from logging import getLogger
logger = getLogger(__name__)
def md_to_html(md_text):
logger.debug("md_to_html:start")
main_contents = markdown.markdown(md_text, extensions=[
'markdown.extensions.abbr',
'markdown.extensions.admonition',
'markdown.extensions.attr_list',
'markdown.extensions.codehilite',
'markdown.extensions.def_list',
'markdown.extensions.extra',
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.headerid',
'markdown.extensions.meta',
'markdown.extensions.nl2br',
'markdown.extensions.sane_lists',
'markdown.extensions.smart_strong',
'markdown.extensions.smarty',
'markdown.extensions.tables',
'markdown.extensions.toc',
# 'markdown.extensions.wikilinks',
'MarkdownHighlight.highlight',
'commons.markdown.extensions.mdx_del_ins',
'commons.markdown.extensions.mdx_superscript',
'commons.markdown.extensions.mdx_semanticwikilinks',
'commons.markdown.extensions.mdx_semanticdata',
'subscript',
])
ALLOWED_TAGS = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'p', 'b',
'i', 'strong', 'a', 'abbr', 'acronym', 'table', 'thead', 'tbody',
'th', 'tr', 'td', 'ul', 'li', 'br', 'pre', 'code',
'blockquote', 'cite', 'hr', 'em', 'del', 'sup', 'sub',
'mark', 'ins', 'span', 'img', 'ol', 'strong']
ALLOWED_ATTRIBUTES = ['content', 'property', 'href', 'target', 'alt', 'src', 'title']
html = bleach.clean(markdown.markdown(main_contents), tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRIBUTES)
logger.debug("md_to_html:end")
return html
def get_contents(wiki_id, page_dirs):
logger.debug("get_contents:start")
try:
page_name = file_name_tools.page_dirs_to_file_name(page_dirs)
get_page_file_data = file_utils.get_file(wiki_id, page_name + ".md")
main_contents = md_to_html(get_page_file_data)
except FileNotFoundError:
raise PageNotFoundError("ページが存在しません。")
logger.debug("get_contents:end")
return main_contents
def get_edit_contents(wiki_id, page_dirs):
logger.debug("get_edit_contents:start")
try:
page_name = file_name_tools.page_dirs_to_file_name(page_dirs)
get_page_file_data = file_utils.get_file(wiki_id, page_name + ".md")
except FileNotFoundError:
raise PageNotFoundError("ページが存在しません。")
logger.debug("get_edit_contents:end")
return get_page_file_data
|
# dict——字典——{key:value}——无序
# key-value类型
# key必须是不可变的类型
# 不支持 下标、切片
print(type({
1: 1,
2: 2,
3: 3
}))
print(type({
'1': 1,
1: 1,
2: 2,
3: 3
}))
print({
1: 1,
2: 2,
3: 3
})
print({
'1': 1,
1: 1,
2: 2,
3: 3
})
#异常:TypeError: unhashable type: 'list'
# print({
# [1, 2]: 1,
# 2: 2,
# 3: 3
# })
#使用tuple作为key,正常
print({
(1, 2): 1,
2: 2,
3: 3
})
# 如何定义空字典?
print(type({}))
|
# -*- coding: utf-8 -*-
"""Nobeyama Radioheliograph TimeSeries subclass definitions."""
from collections import OrderedDict
import pandas
import numpy as np
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.time import TimeDelta
import sunpy.io
from sunpy import config
from sunpy.time import parse_time
from sunpy.util.metadata import MetaDict
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
import astropy.units as u
from astropy.time import TimeDelta
TIME_FORMAT = config.get("general", "time_format")
__all__ = ['NoRHTimeSeries']
class NoRHTimeSeries(GenericTimeSeries):
"""
Nobeyama Radioheliograph Correlation Lightcurve TimeSeries.
Nobeyama Radioheliograph (NoRH) is a radio telescope dedicated to observing
the Sun. It consists of 84 parabolic antennas with 80 cm diameter,
sitting on lines of 490 m long in the east/west and of 220 m long in the north/south.
It observes the full solar disk at 17 GHz and 34 GHz with a temporal resolution
down to 0.1 second resolution (typically 1 s). It is located in Japan at
`35.941667, 138.475833 <https://www.google.com/maps/place/Nobeyama+radio+observatory/@35.9410098,138.470243,14z/data=!4m2!3m1!1s0x0:0xe5a3821a5f6a3c4b>`_.
Its first observation was in April, 1992 and daily 8-hour observations are
available starting June, 1992.
Examples
--------
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> import sunpy.timeseries
>>> norh = sunpy.timeseries.TimeSeries(sunpy.data.sample.NORH_TIMESERIES, source='NoRH') # doctest: +REMOTE_DATA
>>> norh.peek() # doctest: +SKIP
References
----------
* `Nobeyama Radioheliograph Homepage <https://solar.nro.nao.ac.jp/norh/>`_
* `Analysis Manual <https://solar.nro.nao.ac.jp/norh/doc/manuale/index.html>`_
* `Nobeyama Correlation Plots <https://solar.nro.nao.ac.jp/norh/html/cor_plot/>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = 'norh'
def __init__(self, data, header, units, **kwargs):
super(NoRHTimeSeries, self).__init__(data, header, units, **kwargs)
def peek(self, **kwargs):
"""
Plots the NoRH lightcurve TimeSeries
.. plot::
import sunpy.data.sample
import sunpy.timeseries
norh = sunpy.timeseries.TimeSeries(sunpy.data.sample.NORH_TIMESERIES, source='NoRH')
norh.peek()
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used when plotting.
"""
# Check we have a timeseries valid for plotting
self._validate_data_for_ploting()
figure = plt.figure()
axes = plt.gca()
data_lab = str(self.meta.get('OBS-FREQ').values()).replace('[', '').replace(
']', '').replace('\'', '')
axes.plot(self.data.index, self.data, label=data_lab)
axes.set_yscale("log")
axes.set_ylim(1e-4, 1)
axes.set_title('Nobeyama Radioheliograph')
axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))
axes.set_ylabel('Correlation')
axes.legend()
figure.show()
@classmethod
def _parse_file(cls, filepath):
"""This method parses NoRH tca and tcz correlation FITS files."""
hdus = sunpy.io.read_file(filepath)
return cls._parse_hdus(hdus)
@classmethod
def _parse_hdus(cls, hdulist):
"""This method parses NoRH tca and tcz correlation FITS files."""
header = MetaDict(OrderedDict(hdulist[0].header))
# For these NoRH files, the time series data is recorded in the primary
# HDU
data = hdulist[0].data
# No explicit time array in FITS file, so construct the time array from
# the FITS header
obs_start_time = parse_time(header['DATE-OBS'] + 'T' + header['CRVAL1'])
length = len(data)
cadence = np.float(header['CDELT1'])
sec_array = np.linspace(0, length - 1, int(length / cadence))
norh_time = obs_start_time + TimeDelta(sec_array*u.second)
norh_time.precision = 9
norh_time = norh_time.isot.astype('datetime64')
# Add the units data
units = OrderedDict([('Correlation Coefficient', u.dimensionless_unscaled)])
# Todo: check units used.
return pandas.DataFrame(
data, index=norh_time, columns=('Correlation Coefficient', )), header, units
@classmethod
def is_datasource_for(cls, **kwargs):
"""Determines if header corresponds to a Nobeyama Radioheliograph Correlation lightcurve"""
if 'source' in kwargs.keys():
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
if 'meta' in kwargs.keys():
return kwargs['meta'].get('ORIGIN', '').startswith('NOBEYAMA RADIO OBS')
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
import dhtmlparser
from dhtmlparser import first
# Variables ===================================================================
TEXT = "<div><nonpair /></div>"
dom = dhtmlparser.parseString(TEXT)
# Functions & objects =========================================================
def test_replaceWith():
nonpair = first(dom.find("nonpair"))
assert nonpair
nonpair.replaceWith(
dhtmlparser.HTMLElement("<another />")
)
assert dom.find("another")
assert dom.getContent() == "<div><another /></div>"
def test_removeChild():
dom.removeChild(
dom.find("another")
)
assert dom.getContent() == "<div></div>"
dom.removeChild(dom.find("div"), end_tag_too=False)
assert dom.getContent() == ""
assert len(dom.childs) == 1 # endtag wasn't removed
dom2 = dhtmlparser.parseString("<div></div>")
dom2.removeChild(dom2.find("div"))
assert dom2.getContent() == ""
assert not dom2.childs
def test_params():
dom = dhtmlparser.parseString("<xe id=1 />")
xe = first(dom.find("xe"))
assert xe.params["id"] == "1"
xe.params = {}
assert str(xe) == "<xe />"
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root == None:
return 0
else:
return self.calMaxDepth(root)+1
def calMaxDepth(self,node):
leftDepth = 0 if (node.left == None) else self.calMaxDepth(node.left)+1
rightDepth = 0 if (node.right == None) else self.calMaxDepth(node.right)+1
return max(leftDepth,rightDepth)
if __name__ == "__main__":
solution = Solution()
leftLeftNode = TreeNode(2)
leftNode = TreeNode(2)
leftNode.left = leftLeftNode
rightNode = TreeNode(2)
rootNode = TreeNode(1)
rootNode.left = leftNode
rootNode.right = rightNode
print(solution.maxDepth(rootNode))
|
# -*- coding: UTF-8 -*-
from app import app
app.run(debug=True, host="0.0.0.0")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-01 13:32
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cmdb', '0004_category_createtimes'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='createtimes',
),
]
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import os
import subprocess
import logging
import shutil
import glob
import uuid
from pyglidein.client_util import get_presigned_put_url, get_presigned_get_url
class Submit(object):
"""
Base class for the submit classes
Mostly to provide future expansion for common functions
"""
def __init__(self, config, secrets):
"""
Initialize
Args:
config: cluster config dict for cluster
secrets: cluster secrets dict for cluster
"""
self.config = config
self.secrets = secrets
self.startd_cron_scripts = ['clsim_gpu_test.py',
'cvmfs_test.py',
'gridftp_test.py',
'post_cvmfs.sh',
'pre_cvmfs.sh']
def submit(self):
raise NotImplementedError()
def write_line(self, f, line):
"""
Wrapper function so we dont have to write \n a million times
Args:
f: File handle
line: Line to be written to file
"""
f.write(line+"\n")
def get_glidein_script(self):
"""
Get the glidein startup script.
Returns:
String that is the location of the script
"""
# If the user hasn't set ['Glidein']['glidein_script'] assume they want to use the
# glidein_scripts provided by the python package.
package_dir = os.path.dirname(os.path.abspath(__file__))
glidein_script = os.path.join(package_dir, 'glidein_start.sh')
if 'glidein_script' in self.config['Glidein']:
glidein_script = self.config['Glidein']['glidein_script']
return glidein_script
def get_resource_limit_scale(self, key, sec="SubmitFile"):
"""
Return scaling factor for job limit resources
(e.g. memory, disk space).
Args:
key: key to evaluate in config file
sec: section to evaluate in config file
(default: 'SubmitFile')
Returns:
A float that is the scaling factor for a resource
"""
try:
# look for entry and check type
scale = self.config[sec][key]
if not (isinstance(scale, int) or
isinstance(scale, float)):
raise TypeError()
except:
# return 1 if no entry or invalid type found
scale = 1
return scale
def cleanup(self, cmd, direc):
pass
class SubmitPBS(Submit):
"""Submit a PBS / Torque job"""
option_tag = "#PBS"
def write_option(self, f, line):
self.write_line(f, self.option_tag+" "+line)
def write_general_header(self, f, cluster_config, mem=3000, walltime_hours=14, disk=1,
num_nodes=1, num_cpus=1, num_gpus=0,
num_jobs=0):
"""
Writing the header for a PBS submission script.
Most of the pieces needed to tell PBS what resources
are being requested.
Args:
f: python file object
mem: requested memory
walltime_hours: requested wall time
num_nodes: requested number of nodes
num_cpus: requested number of cpus
num_gpus: requested number of gpus
"""
self.write_line(f, "#!/bin/bash")
# Add the necessary gpu request tag if we need gpus.
resource_line = "-l nodes=%d:ppn=%d" % (num_nodes, num_cpus)
node_property = cluster_config.get("node_property", False)
if node_property:
resource_line +=':%s' % (node_property)
if num_gpus > 0 and cluster_config.get("set_gpu_req", True):
resource_line += ':gpus=%d' % (num_gpus)
self.write_option(f, resource_line)
if cluster_config.get("pmem_only", False):
self.write_option(f, "-l pmem=%dmb" % mem)
elif cluster_config.get("pvmem", False):
self.write_option(f, "-l pmem=%dmb,vmem=%dmb" % (mem,mem*num_cpus))
elif cluster_config.get("vmem_only", False):
self.write_option(f, "-l vmem=%dmb" % mem)
elif cluster_config.get("mem_only", False):
self.write_option(f, "-l mem=%dmb" % mem)
else:
self.write_option(f, "-l pmem=%dmb,mem=%dmb" % (mem,mem*num_cpus))
self.write_option(f, "-l walltime=%d:00:00" % walltime_hours)
if ('Mode' in self.config and 'debug' in self.config['Mode']
and self.config["Mode"]["debug"]):
outdir = os.path.join(os.getcwd(),'out')
if not os.path.isdir(outdir):
os.mkdir(outdir)
self.write_option(f, "-o %s/${PBS_JOBID}.out"%outdir)
self.write_option(f, "-e %s/${PBS_JOBID}.err"%outdir)
else:
self.write_option(f, "-o /dev/null")
self.write_option(f, "-e /dev/null")
if num_jobs > 0:
self.write_option(f, "-t 0-%d" % num_jobs)
env_vars = '-v '
if not self.config.get("StartdChecks", {}).get("enable_startd_checks", True):
env_vars += 'DISABLE_STARTD_CHECKS=1'
if env_vars != '-v ':
self.write_option(f, env_vars)
def write_glidein_variables(self, f, mem=1000, walltime_hours=12,
num_cpus=1, num_gpus=0, disk=1):
"""
Tell the glidein what resources it has.
Args:
f: python file object
mem: memory provided for glidein
walltime_hours: lifetime of glidein in hours
cpus: number of cpus provided
gpus: number of cpus provided
disk: disk provided for glidein
"""
self.write_line(f, "MEMORY=%d" % mem)
# we do not want same walltime as this may lead to hitting the walltime, thus we spot the glidein 5 min earlier (300 sec)
self.write_line(f, "WALLTIME=%d" % (walltime_hours*3600 - 300))
self.write_line(f, "CPUS=%d" % num_cpus)
self.write_line(f, "DISK=%d" % (disk*1024))
if num_gpus:
self.write_line(f, 'if [ "$CUDA_VISIBLE_DEVICES" -eq "$CUDA_VISIBLE_DEVICES" ] 2>/dev/null ; then')
self.write_line(f, ' GPUS="CUDA${CUDA_VISIBLE_DEVICES}"')
self.write_line(f, 'elif [ "x$CUDA_VISIBLE_DEVICES" = "x" ] ; then')
self.write_line(f, ' GPUS=%d'%num_gpus)
self.write_line(f, 'else')
self.write_line(f, ' GPUS=$CUDA_VISIBLE_DEVICES')
self.write_line(f, 'fi')
else:
self.write_line(f, 'GPUS=0')
if 'site' in self.config['Glidein']:
self.write_line(f, 'SITE="%s"' % self.config['Glidein']['site'])
if 'resourcename' in self.config['Glidein']:
self.write_line(f, 'ResourceName="%s"' % self.config['Glidein']['resourcename'])
if 'cluster' in self.config['Glidein']:
self.write_line(f, 'CLUSTER="%s"' % self.config['Glidein']['cluster'])
def write_glidein_part(self, f, local_dir=None, glidein_tarball=None, presigned_put_url=None,
presigned_get_url=None):
"""
Writing the pieces needed to execute the glidein
Args:
f: python file object
local_dir: what is the local directory
glidein_tarball: file name of tarball
"""
self.write_line(f, 'CLEANUP=0')
self.write_line(f, 'LOCAL_DIR=%s' % local_dir)
self.write_line(f, 'if [ ! -d $LOCAL_DIR ]; then')
self.write_line(f, ' mkdir -p $LOCAL_DIR')
self.write_line(f, ' CLEANUP=1')
self.write_line(f, 'fi')
self.write_line(f, 'cd $LOCAL_DIR')
if glidein_tarball:
self.write_line(f, 'cp %s %s' % (glidein_tarball, os.path.basename(glidein_tarball)))
glidein_script = self.get_glidein_script()
if not os.path.isfile(glidein_script):
raise Exception("glidein_script %s does not exist!" % glidein_script)
self.write_line(f, 'cp %s %s' % (glidein_script, os.path.basename(glidein_script)))
osarch_script = os.path.join(os.path.dirname(glidein_script), 'os_arch.sh')
if not os.path.isfile(osarch_script):
raise Exception("%s does not exist!" % osarch_script)
self.write_line(f, 'cp %s %s' % (osarch_script, 'os_arch.sh'))
log_shipper_script = os.path.join(os.path.dirname(glidein_script), 'log_shipper.sh')
if not os.path.isfile(log_shipper_script):
raise Exception("%s does not exist!" % log_shipper_script)
self.write_line(f, 'cp %s %s' % (log_shipper_script, 'log_shipper.sh'))
# Adding StartD Cron Scripts
if self.config.get("StartdChecks", {}).get("enable_startd_checks", True):
startd_cron_scripts_dir = os.path.join(os.path.dirname(glidein_script),
'startd_cron_scripts')
if not os.path.isdir(startd_cron_scripts_dir):
raise Exception("StartD cron scripts directory not found: "
"{}".format(startd_cron_scripts_dir))
for script in self.startd_cron_scripts:
script_path = os.path.join(startd_cron_scripts_dir, script)
if not os.path.isfile(script_path):
raise Exception("Stard cron script not found: {}".format(script))
self.write_line(f, 'cp %s %s' % (script_path, script))
f.write('exec env -i CPUS=$CPUS GPUS=$GPUS MEMORY=$MEMORY DISK=$DISK WALLTIME=$WALLTIME '
'DISABLE_STARTD_CHECKS=$DISABLE_STARTD_CHECKS ')
if 'site' in self.config['Glidein']:
f.write('SITE=$SITE ')
if 'resourcename' in self.config['Glidein']:
f.write('ResourceName=$ResourceName ')
if 'cluster' in self.config['Glidein']:
f.write('CLUSTER=$CLUSTER ')
if self.config['SubmitFile'].get('cvmfs_job_wrapper', False):
f.write('CVMFS_JOB_WRAPPER=1 ')
if presigned_put_url is not None and presigned_get_url is not None:
f.write('PRESIGNED_PUT_URL="{}" PRESIGNED_GET_URL="{}" '.format(presigned_put_url,
presigned_get_url))
if "CustomEnv" in self.config:
for k, v in self.config["CustomEnv"].items():
f.write(k + '=' + v + ' ')
if 'executable' in self.config['SubmitFile']:
f.write(str(self.config['SubmitFile']['executable']))
else:
f.write(str(os.path.basename(self.get_glidein_script())))
#executable = os.path.basename(glidein_script)
#self.write_line(f, './%s' % executable)
self.write_line(f, '')
self.write_line(f, 'if [ $CLEANUP -eq 1 ]; then')
self.write_line(f, ' rm -rf $LOCAL_DIR')
self.write_line(f, 'fi')
def get_cores_for_memory(self, cluster_config, num_cpus_advertised, num_gpus_advertised, mem_advertised):
"""
Scale number of cores to satisfy memory request, assuming fixed amount
of memory per core.
Args:
num_cpus_advertised: the number of cores explicitly requested
num_gpus_advertised: the number of GPUs explicitly requested
mem_advertised: the total amount of memory requested
Returns:
num_cpus: number of cores to request
mem_requested: amount of memory to request per core
mem_advertised: amount of memory the Condor slot should advertise
"""
num_cpus = num_cpus_advertised
mem_requested = mem_advertised
mem_per_core = cluster_config.get('mem_per_core', 2000)
if num_gpus_advertised:
if mem_requested > mem_per_core:
# just ask for the max mem, and hope that's good enough
mem_requested = mem_per_core
mem_advertised = mem_requested
else:
# It is easier to request more cpus rather than more memory
while mem_requested > mem_per_core:
num_cpus += 1
mem_requested = mem_advertised/num_cpus
return num_cpus, mem_requested, mem_advertised
def write_submit_file(self, filename, state, group_jobs, cluster_config,
presigned_put_url=None, presigned_get_url=None):
"""
Writing the submit file
Args:
filename: name of PBS script to create
state: what resource requirements a given glidein has
group_jobs: if True, group jobs into arrays
cluster_config: the Cluster config dict (or that of an alternate partition)
"""
with open(filename, 'w') as f:
if cluster_config['whole_node']:
num_cpus = int(cluster_config['whole_node_cpus'])
mem_advertised = int(cluster_config['whole_node_memory'])
disk = int(cluster_config['whole_node_disk'])
if 'whole_node_gpus' in cluster_config:
num_gpus = int(cluster_config['whole_node_gpus'])
else:
num_gpus = 0
if 'mem_per_core' not in cluster_config:
# by default assume we gave the correct amount in whole_node_memory
cluster_config['mem_per_core'] = 10000000
_, mem_requested, mem_advertised = self.get_cores_for_memory(cluster_config, 1, num_gpus, mem_advertised)
else:
num_cpus = state["cpus"]
mem_safety_margin = 1.05*self.get_resource_limit_scale("mem_safety_scale")
mem_advertised = int(state["memory"]*mem_safety_margin)
num_gpus = state["gpus"]
disk = state["disk"]*1.1
num_cpus, mem_requested, mem_advertised = self.get_cores_for_memory(cluster_config, num_cpus, num_gpus, mem_advertised)
walltime = int(cluster_config["walltime_hrs"])
self.write_general_header(f, cluster_config, mem=mem_requested, num_cpus=num_cpus,
num_gpus=num_gpus, walltime_hours=walltime,
disk=disk,
num_jobs = state["count"] if group_jobs else 0)
if "custom_header" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_header"])
if "custom_middle" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_middle"])
self.write_glidein_variables(f, mem=mem_advertised,
num_cpus=num_cpus, num_gpus=num_gpus,
walltime_hours=walltime, disk=disk)
kwargs = {
'local_dir': self.config["SubmitFile"]["local_dir"],
'presigned_put_url': presigned_put_url,
'presigned_get_url': presigned_get_url
}
if "tarball" in self.config["Glidein"]:
if "loc" in self.config["Glidein"]:
glidein_tarball = os.path.join(self.config["Glidein"]["loc"],
self.config["Glidein"]["tarball"])
else:
glidein_tarball = self.config["Glidein"]["tarball"]
if os.path.isfile(glidein_tarball):
kwargs['glidein_tarball'] = glidein_tarball
else:
raise Exception("The tarball you provided does not exist")
self.write_glidein_part(f, **kwargs)
if "custom_end" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_end"])
def submit(self, state, partition="Cluster"):
"""
Writing submit file and submitting a job for PBS-like batch managers
Args:
state: what resource requirements a given glidein has
"""
submit_filename = 'submit.pbs'
if 'filename' in self.config["SubmitFile"]:
submit_filename = self.config["SubmitFile"]["filename"]
cluster_config = self.config[partition]
group_jobs = ("group_jobs" in cluster_config and
cluster_config["group_jobs"] and
"count" in state)
self.write_submit_file(submit_filename, state, group_jobs, cluster_config)
num_submits = 1 if group_jobs else state["count"] if "count" in state else 1
for i in range(num_submits):
if self.config['StartdLogging']['send_startd_logs'] is True:
startd_logfile_name = '{}_{}.tar.gz'.format(self.config['Glidein']['site'], uuid.uuid4())
presigned_put_url = get_presigned_put_url(startd_logfile_name, self.config, self.secrets)
presigned_get_url = get_presigned_get_url(startd_logfile_name, self.config, self.secrets)
self.write_submit_file(submit_filename,
state,
group_jobs,
cluster_config,
presigned_put_url,
presigned_get_url)
cmd = self.config[partition]["submit_command"] + " " + submit_filename
if not ('Mode' in self.config and 'dryrun' in self.config['Mode'] and
self.config['Mode']['dryrun']):
subprocess.check_call(cmd, shell=True)
def cleanup(self, cmd, direc):
"""
Cleans up temporary directories that were created on a network file system that were not
deleted by the job itself. Checks whether the job ID used to identify a temporary directory
is still in the queue. If it is not, the directory gets deleted.
Args:
cmd: Command needed to query about which jobs are running for the user
direc: Which directory to look for the temporory directories
"""
cmd = cmd[:-6]
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
d = p.communicate()[0]
job_ids = set([job.split(" ")[0] for job in d.splitlines() if "[" not in job.split(" ")[0] ])
dir_ids = set([dir.split("/")[-1].split(".")[0] for dir in glob.glob(os.path.join(os.path.expandvars(direc), "*"))])
for ids in (dir_ids - job_ids):
logging.info("Deleting %s", ids)
shutil.rmtree(glob.glob(os.path.join(os.path.expandvars(direc), ids + "*"))[0])
class SubmitSLURM(SubmitPBS):
"""SLURM is similar to PBS, but with different headers"""
option_tag = "#SBATCH"
def write_general_header(self, f, cluster_config, mem=3000, walltime_hours=14, disk=1,
num_nodes=1, num_cpus=1, num_gpus=0,
num_jobs=0):
"""
Writing the header for a SLURM submission script.
Most of the pieces needed to tell SLURM what resources
are being requested.
Args:
f: python file object
mem: requested memory
walltime_hours: requested wall time
num_nodes: requested number of nodes
num_cpus: requested number of cpus
num_gpus: requested number of gpus
num_jobs: requested number of jobs
"""
if num_jobs > 1:
raise Exception('more than one job not supported')
self.write_line(f, "#!/bin/bash")
self.write_option(f, '--job-name="glidein"')
self.write_option(f, '--nodes=%d'%num_nodes)
self.write_option(f, '--ntasks-per-node=%d'%num_cpus)
self.write_option(f, '--mem=%d'%(mem))
if num_gpus:
gpu_submit = '--gres=gpu:%d'
if 'gpu_submit' in self.config['SubmitFile']:
gpu_submit = self.config['SubmitFile']['gpu_submit']
self.write_option(f, gpu_submit%num_gpus)
if "partition" in cluster_config:
self.write_option(f, "--partition=%s" % cluster_config["partition"])
self.write_option(f, "--time=%d:00:00" % walltime_hours)
if self.config["Mode"]["debug"]:
log_dir = os.path.join(os.getcwd(), 'out')
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
self.write_option(f, "--output="+os.path.join(log_dir, "%j.out"))
self.write_option(f, "--error="+os.path.join(log_dir, "%j.err"))
else:
self.write_option(f, "--output=/dev/null")
self.write_option(f, "--error=/dev/null")
self.write_option(f, "--export=ALL")
class SubmitUGE(SubmitPBS):
"""UGE is similar to PBS, but with different headers"""
option_tag = "#$"
def get_cores_for_memory(self, cluster_config, num_cpus_advertised, num_gpus_advertised, mem_advertised):
"""
Scale number of cores to satisfy memory request.
UGE can assign variable memory per core, so just pass the request straight through.
"""
return num_cpus_advertised, mem_advertised, mem_advertised
def write_general_header(self, f, cluster_config, mem=3000, walltime_hours=14, disk=1,
num_nodes=1, num_cpus=1, num_gpus=0,
num_jobs=0):
"""
Writing the header for a SLURM submission script.
Most of the pieces needed to tell SLURM what resources
are being requested.
Args:
f: python file object
mem: requested memory
walltime_hours: requested wall time
num_nodes: requested number of nodes
num_cpus: requested number of cpus
num_gpus: requested number of gpus
"""
self.write_line(f, "#!/bin/bash")
self.write_option(f, '-S /bin/bash')
self.write_option(f, '-l h_rss=%dM'%(mem//num_cpus))
self.write_option(f, '-l tmpdir_size=%dM'%(max((disk//num_cpus, 1000))))
if num_gpus:
self.write_option(f, "-l gpu=%d"%num_gpus)
if num_cpus > 1:
self.write_option(f, "-pe multicore %d"%num_cpus)
self.write_option(f, "-l h_rt=%d:00:00" % walltime_hours)
if self.config["Mode"]["debug"]:
self.write_option(f, "-o %s/out/$JOB_ID.out"%os.getcwd())
self.write_option(f, "-e %s/out/$JOB_ID.err"%os.getcwd())
else:
self.write_option(f, "-o /dev/null")
self.write_option(f, "-e /dev/null")
if num_jobs > 0:
self.write_option(f, "-t 1-%d" % num_jobs)
class SubmitLSF(SubmitPBS):
"""LSF is similar to PBS, but with different headers"""
option_tag = "#BSUB"
def write_general_header(self, f, cluster_config, mem=3000, walltime_hours=14, disk=1,
num_nodes=1, num_cpus=1, num_gpus=0,
num_jobs=0):
"""
Writing the header for an LSF submission script.
Most of the pieces needed to tell LSF what resources
are being requested.
Args:
f: python file object
mem: requested memory
walltime_hours: requested wall time
num_nodes: requested number of nodes
num_cpus: requested number of cpus
num_gpus: requested number of gpus
num_jobs: number of jobs in a job array
"""
self.write_line(f, "#!/bin/bash")
#if num_gpus > 0:
# self.write_option(f, "-R 'rusage[cuda=%d]'" % num_gpus)
walltime_line = "-W %d:00" % walltime_hours
# check for additional parameters in config
if 'SubmitFile' in self.config:
submit_conf = self.config['SubmitFile']
if 'ref_host' in submit_conf:
# add reference host for walltime if given
walltime_line+="/%s" % submit_conf['ref_host']
self.write_option(f, walltime_line)
# default memory units are kB for LSF
mem_scale = 1000
# scale memory to non-default units if parameter exists
mem_scale*=self.get_resource_limit_scale("mem_scale")
self.write_option(f, "-M %d" % (mem*mem_scale))
self.write_option(f, "-n %d" % num_cpus)
"""
# ignore for now
# need to make sure to reserve the correct number of nodes
cpus_tot = num_cpus
if num_nodes > 1:
cpus_tot = num_nodes*cpus_per_node
self.write_option(f, "-n %d -R 'span[ptile=%d]'" %\
(cpus_tot, cpus_per_node))
"""
if num_jobs > 0:
# job name will be "[index]"
self.write_option(f, "-J [1-%d]" % num_jobs)
if ('Mode' in self.config and 'debug' in self.config['Mode']
and self.config['Mode']['debug']):
outdir = os.path.join(os.getcwd(), 'out')
if not os.path.isdir(outdir):
os.mkdir(outdir)
# %I is job index (all jobs in an array have same id %J)
self.write_option(f, "-o %s/%%J_%%I.out" % outdir)
self.write_option(f, "-e %s/%%J_%%I.err" % outdir)
else:
self.write_option(f, "-o /dev/null")
self.write_option(f, "-e /dev/null")
class SubmitSGE(SubmitPBS):
"""SGE is similar to PBS, but with different headers"""
option_tag = "#$"
def get_cores_for_memory(self, cluster_config, num_cpus_advertised, num_gpus_advertised, mem_advertised):
"""
Scale number of cores to satisfy memory request.
SGE can assign variable memory per core, so just pass the request straight through.
"""
return num_cpus_advertised, mem_advertised, mem_advertised
def write_general_header(self, f, cluster_config, mem=3000, walltime_hours=14, disk=1,
num_nodes=1, num_cpus=1, num_gpus=0,
num_jobs=0):
"""
Writing the header for a SGE submission script.
Most of the pieces needed to tell SGE what resources
are being requested.
Args:
f: python file object
mem: requested memory
walltime_hours: requested wall time
num_nodes: requested number of nodes
num_cpus: requested number of cpus
num_gpus: requested number of gpus
"""
self.write_line(f, "#!/bin/bash")
self.write_option(f, '-S /bin/bash')
self.write_option(f, '-l h_rss=%dM'%(mem//num_cpus))
if num_gpus:
self.write_option(f, "-l gpu=%d"%num_gpus)
if num_cpus > 1:
self.write_option(f, "-pe mpi %d"%num_cpus)
self.write_option(f, "-l h_rt=%d:00:00" % walltime_hours)
if self.config["Mode"]["debug"]:
self.write_option(f, "-o %s/out/$JOB_ID.out"%os.getcwd())
self.write_option(f, "-e %s/out/$JOB_ID.err"%os.getcwd())
else:
self.write_option(f, "-o /dev/null")
self.write_option(f, "-e /dev/null")
if num_jobs > 0:
self.write_option(f, "-t 1-%d" % num_jobs)
class SubmitCondor(Submit):
"""Submit an HTCondor job"""
def make_env_wrapper(self, env_wrapper, cluster_config):
"""
Creating wrapper execute script for
HTCondor submit file
Args:
env_wrapper: name of wrapper script
"""
with open(env_wrapper, 'w') as f:
self.write_line(f, '#!/bin/sh')
self.write_line(f, 'CPUS=$(grep -e "^Cpus" $_CONDOR_MACHINE_AD|awk -F "= " "{print \\$2}")')
self.write_line(f, 'MEMORY=$(grep -e "^Memory" $_CONDOR_MACHINE_AD|awk -F "= " "{print \\$2}")')
self.write_line(f, 'DISK=$(grep -e "^Disk =" $_CONDOR_MACHINE_AD|awk -F "= " "{print \\$2}")')
self.write_line(f, 'GPUS=$(grep -e "^AssignedGPUs" $_CONDOR_MACHINE_AD|awk -F "= " "{print \\$2}"|sed "s/\\"//g")')
self.write_line(f, 'if ( [ -z $GPUS ] && [ ! -z $CUDA_VISIBLE_DEVICES ] ); then')
self.write_line(f, ' GPUS=$CUDA_VISIBLE_DEVICES')
self.write_line(f, 'fi')
self.write_line(f, 'GPUS_NO_DIGITS=$(echo $GPUS | sed \'s/[0-9]*//g\')')
self.write_line(f, 'if [ "${GPUS_NO_DIGITS}" = "${GPUS}" ]; then')
self.write_line(f, ' GPUS=""')
self.write_line(f, 'elif [ -z $GPUS_NO_DIGITS ]; then')
self.write_line(f, ' GPUS="CUDA${GPUS}"')
self.write_line(f, 'fi')
self.write_line(f, 'if ( [ -z $GPUS ] || [ "$GPUS" = "10000" ] || [ "$GPUS" = "CUDA10000" ] ); then')
self.write_line(f, ' GPUS=0')
self.write_line(f, 'fi')
if 'site' in self.config['Glidein']:
self.write_line(f, 'SITE="%s"' % self.config['Glidein']['site'])
if 'resourcename' in self.config['Glidein']:
self.write_line(f, 'ResourceName="%s"' % self.config['Glidein']['resourcename'])
else:
self.write_line(f, 'ResourceName=$(grep -e "^GLIDEIN_ResourceName" $_CONDOR_MACHINE_AD|awk -F "= " "{print \\$2}"|sed "s/\\"//g")')
if 'cluster' in self.config['Glidein']:
self.write_line(f, 'CLUSTER="%s"' % self.config['Glidein']['cluster'])
f.write('exec env -i CPUS=$CPUS GPUS=$GPUS MEMORY=$MEMORY DISK=$DISK '
'PRESIGNED_PUT_URL=$PRESIGNED_PUT_URL PRESIGNED_GET_URL=$PRESIGNED_GET_URL ')
if 'site' in self.config['Glidein']:
f.write('SITE=$SITE ')
if 'resourcename' in self.config['Glidein']:
f.write('ResourceName=$ResourceName ')
if 'cluster' in self.config['Glidein']:
f.write('CLUSTER=$CLUSTER ')
# reduce walltime for internal job by 300 sec (5min) to make sure it finished before the outer job hits the walltime
walltime = int(cluster_config["walltime_hrs"])*3600-300
f.write('WALLTIME=%d '%walltime)
if self.config['SubmitFile'].get('cvmfs_job_wrapper', False):
f.write('CVMFS_JOB_WRAPPER=1 ')
if "CustomEnv" in self.config:
for k, v in self.config["CustomEnv"].items():
f.write(k + '=' + v + ' ')
if 'executable' in self.config['SubmitFile']:
f.write(str(self.config['SubmitFile']['executable']))
else:
f.write(str(os.path.basename(self.get_glidein_script())))
mode = os.fstat(f.fileno()).st_mode
mode |= 0o111
os.fchmod(f.fileno(), mode & 0o7777)
def make_submit_file(self, filename, env_wrapper, state, group_jobs, cluster_config,
presigned_put_url=None, presigned_get_url=None):
"""
Creating HTCondor submit file
Args:
filename: name of HTCondor submit file
env_wrapper: name of wrapper script
state: what resource requirements a given glidein has
"""
with open(filename, 'w') as f:
if "custom_header" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_header"])
if ('Mode' in self.config and 'debug' in self.config['Mode']
and self.config["Mode"]["debug"]):
outdir = os.path.join(os.getcwd(),'out')
if not os.path.isdir(outdir):
os.mkdir(outdir)
self.write_line(f, "output = %s/$(Cluster).out"%outdir)
self.write_line(f, "error = %s/$(Cluster).out"%outdir)
else:
self.write_line(f, "output = /dev/null")
self.write_line(f, "error = /dev/null")
if 'log' in self.config['SubmitFile']:
self.write_line(f, "log = "+self.config['SubmitFile']['log'])
else:
self.write_line(f, "log = log")
self.write_line(f, "notification = never")
self.write_line(f, "should_transfer_files = YES")
self.write_line(f, "when_to_transfer_output = ON_EXIT")
self.write_line(f, "want_graceful_removal = True")
self.write_line(f, "executable = %s" % env_wrapper)
self.write_line(f, "+TransferOutput=\"\"")
# get input files
infiles = []
glidein_script = self.get_glidein_script()
if not os.path.isfile(glidein_script):
raise Exception("no glidein_script provided")
infiles.append(glidein_script)
osarch_script = os.path.join(os.path.dirname(glidein_script),'os_arch.sh')
if not os.path.isfile(osarch_script):
raise Exception("os_arch.sh not found")
infiles.append(osarch_script)
log_shipper_script = os.path.join(os.path.dirname(glidein_script),'log_shipper.sh')
if not os.path.isfile(log_shipper_script):
raise Exception("log_shipper_script.sh not found")
infiles.append(log_shipper_script)
if "tarball" in self.config["Glidein"]:
if not os.path.isfile(self.config["Glidein"]["tarball"]):
raise Exception("provided tarball does not exist")
infiles.append(self.config["Glidein"]["tarball"])
# Adding StartD Cron Scripts
if self.config.get("StartdChecks", {}).get("enable_startd_checks", True):
startd_cron_scripts_dir = os.path.join(os.path.dirname(glidein_script),
'startd_cron_scripts')
if not os.path.isdir(startd_cron_scripts_dir):
raise Exception("StartD cron scripts directory not found: "
"{}".format(startd_cron_scripts_dir))
for script in self.startd_cron_scripts:
script_path = os.path.join(startd_cron_scripts_dir, script)
if not os.path.isfile(script_path):
raise Exception("Stard cron script not found: {}".format(script))
infiles.append(os.path.join(startd_cron_scripts_dir, script))
self.write_line(f, "transfer_input_files = %s"%(','.join(infiles)))
if "custom_middle" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_middle"])
if cluster_config['whole_node']:
num_cpus = int(cluster_config['whole_node_cpus'])
mem = int(cluster_config['whole_node_memory'])
disk = int(cluster_config['whole_node_disk'])*1000
if 'whole_node_gpus' in cluster_config:
num_gpus = int(cluster_config['whole_node_gpus'])
else:
num_gpus = 0
self.write_line(f, 'request_cpus=%d' % num_cpus)
self.write_line(f, 'request_memory=%d' % mem)
self.write_line(f, 'request_disk=%d' % disk)
if state["gpus"] != 0 and num_gpus:
self.write_line(f, 'request_gpus=%d' % num_gpus)
else:
if state["cpus"] != 0:
self.write_line(f, 'request_cpus=%d' % state["cpus"])
if state["memory"] != 0:
mem_safety_margin = 1.1*self.get_resource_limit_scale("mem_safety_scale")
self.write_line(f, 'request_memory=%d' % int(state["memory"]*mem_safety_margin))
if state["disk"] != 0:
self.write_line(f, 'request_disk=%d' % int(state["disk"]*1024*1.1))
if state["gpus"] != 0:
self.write_line(f, 'request_gpus=%d' % int(state["gpus"]))
# Creating environment variables
environment_variables = ''
if presigned_put_url is not None and presigned_get_url is not None:
environment_variables += ('PRESIGNED_PUT_URL={} '
'PRESIGNED_GET_URL={} ').format(presigned_put_url,
presigned_get_url)
if not self.config.get("StartdChecks", {}).get("enable_startd_checks", True):
environment_variables += 'DISABLE_STARTD_CHECKS=1'
if environment_variables != '':
self.write_line(f, 'environment = "%s"' % environment_variables)
if "custom_footer" in self.config["SubmitFile"]:
self.write_line(f, self.config["SubmitFile"]["custom_footer"])
if group_jobs:
self.write_line(f, 'queue %d' % state["count"])
else:
self.write_line(f, 'queue')
def submit(self, state, partition="Cluster"):
"""
Writing submit file and submitting a HTCondor job
Args:
state: what resource requirements a given glidein has
"""
submit_filename = 'submit.condor'
if 'filename' in self.config["SubmitFile"]:
submit_filename = self.config["SubmitFile"]["filename"]
env_filename = 'env_wrapper.sh'
if 'env_wrapper_name' in self.config['SubmitFile']:
env_filename = self.config["SubmitFile"]["env_wrapper_name"]
cluster_config = self.config[partition]
group_jobs = ("group_jobs" in cluster_config and
cluster_config["group_jobs"] and
"count" in state)
self.make_env_wrapper(env_filename, cluster_config)
num_submits = 1 if group_jobs else state["count"] if "count" in state else 1
for i in range(num_submits):
if self.config.get('StartdLogging', {}).get('send_startd_logs', False) is True:
startd_logfile_name = '{}_{}.tar.gz'.format(self.config['Glidein']['site'],
uuid.uuid4())
presigned_put_url = get_presigned_put_url(startd_logfile_name, self.config,
self.secrets)
presigned_get_url = get_presigned_get_url(startd_logfile_name, self.config,
self.secrets)
self.make_submit_file(submit_filename,
env_filename,
state,
group_jobs,
cluster_config,
presigned_put_url,
presigned_get_url)
else:
self.make_submit_file(submit_filename,
env_filename,
state,
group_jobs,
cluster_config)
cmd = cluster_config["submit_command"] + " " + submit_filename
subprocess.check_call(cmd, shell=True)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 22 13:19:47 2018
This script loads (pre-processed) CMF and PCR output as well as observed discharge.
It subsequently alignes the lengths of the time series in case they differ.
Time series are plotted for both Hardinge Bridge and Bahadurabad.
For further analysis of for instance the KGE, the aligned time series are saved to file.
@author: J.M. Hoch
@mail: j.m.hoch@uu.nl
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import datetime
# DEFINE PLOTTING PERIOD
# since it does not need to correspond with entire modelling period
def date_range(start, end):
r = (end+datetime.timedelta(days=1)-start).days
return [start+datetime.timedelta(days=i) for i in range(r)]
start = datetime.date(2000,01,01)
end = datetime.date(2009,12,31)
end_plot = datetime.date(2010,01,01)
date = date_range(start, end)
# LOAD FILES
# observed discharge
obs_hab_fo = r'path/to/hab_iwm.txt' # at Hardinge Bridge
obs_bah_fo = r'path/to/bah_iwm.txt' # at Bahadurabad
obs_hab = np.loadtxt(obs_hab_fo)
obs_bah = np.loadtxt(obs_bah_fo)
# CMF discharge
# from script '0_preprocess_and_sample_CMF_output.py'
CMF_hab_fo = r'path/to/sampled_CMF_output/QCMF_at_HardingeBridge.txt'
CMF_bah_fo = r'path/to/sampled_CMF_output/QCMF_at_Bahadurabad.txt'
CMF_hab = np.loadtxt(CMF_hab_fo)
CMF_bah = np.loadtxt(CMF_bah_fo)
# PCR dischage
# using dump-files here obtained from ncview
PCR_hab_fo = r'path/to/sampled_PCR_output/QPCR_at_HardingeBridge.dump'
PCR_bah_fo = r'path/to/sampled_PCR_output/QPCR_at_Bahadurabad.dump'
PCR_hab = np.loadtxt(PCR_hab_fo, skiprows=4)[:,1]
PCR_bah = np.loadtxt(PCR_bah_fo, skiprows=4)[:,1]
# FIND COMMON TIME PERIOD
# in case time lengths differ
minLength = min(len(PCR_bah), len(CMF_bah))
lag1 = len(CMF_bah) - minLength
# LIMIT ARRAYS TO COMMON TIME PERIOD
obs_hab_mL = obs_hab[:minLength]
obs_bah_mL = obs_bah[:minLength]
CMF_hab_mL = CMF_hab[lag1:minLength+lag1]
CMF_bah_mL = CMF_bah[lag1:minLength+lag1]
PCR_hab_mL = PCR_hab[:minLength]
PCR_bah_mL = PCR_bah[:minLength]
date = date[:minLength]
# DEFINE TIME PERIOD FOR PLOT AND TIMESERIES OUTPUT
d1 = datetime.date(2004,01,01)
d2 = datetime.date(2009,01,01)
delta_d1 = (d1 - start).days
delta_d2 = (d2 - start).days
# PLOT OBSERVED AND SIMULATED DISCHAGE
# at Harbinge Bridge
fig = plt.figure(1, figsize=(40,20))
ax1 = fig.add_subplot(211)
l_obs, = ax1.plot_date(x=date, y=obs_hab_mL/1000, fmt=':', c='k', linewidth=8)
l_pcr, = ax1.plot_date(x=date, y=PCR_hab_mL/1000, fmt='-', c='r', linewidth=8)
l_cmf, = ax1.plot_date(x=date, y=CMF_hab_mL/1000, fmt='-', c='b', linewidth=8)
ax1.legend((l_obs, l_pcr, l_cmf, ), ('OBS', 'PCR-DynRout', 'PCR->CMF', ), loc=1, ncol=3, frameon=True)
ax1.set_yticks(np.arange(20,60.1,20))
ax1.set_ylim(0,60)
ax1.set_xlim(d1, d2)
ax1.xaxis.labelpad = 1
plt.setp(ax1.get_yticklabels())
plt.setp(ax1.get_xticklabels(), visible=False)
# at Bahadurabad
ax2 = fig.add_subplot(212)
ax2.plot_date(x=date, y=obs_bah_mL/1000, fmt=':', c='k', linewidth=8)
ax2.plot_date(x=date, y=PCR_bah_mL/1000, fmt='-', c='r', linewidth=8)
ax2.plot_date(x=date, y=CMF_bah_mL/1000, fmt='-', c='b', linewidth=8)
ax2.set_yticks(np.arange(30,90.1,30))
ax2.set_ylim(0,90)
ax2.set_xlim(d1, d2)
ax2.xaxis.labelpad = 1
plt.setp(ax2.get_yticklabels())
plt.setp(ax2.get_xticklabels(), visible=True)
plt.text(0.05,0.9,'b)',fontweight='bold')
pos1 = ax1.get_position()
posX0leg = pos1.x0 - 0.125
fig.text(posX0leg, 0.5, 'discharge Q [$10^3 m^3 s^{-1}$]', va='center', rotation='vertical', fontweight=22)
plt.tight_layout()
# SAVE ALIGNED TXT FILES FOR ANALYSIS IN R-STUDIO
# observed discharge
np.savetxt(r'path/to/validationFolder/hab_iwm_mL.txt', obs_hab_mL[delta_d1:delta_d2])
np.savetxt(r'path/to/validationFolder/bah_iwm_mL.txt', obs_bah_mL[delta_d1:delta_d2])
# CMF discharge
np.savetxt(r'path/to/validationFolder/CMF_HardingeBridge_aligned.txt', CMF_hab_mL[delta_d1:delta_d2])
np.savetxt(r'path/to/validationFolder/CMF_Bahadurabad_aligned.txt', CMF_bah_mL[delta_d1:delta_d2])
# PCR discharge
np.savetxt(r'path/to/validationFolder/PCR_HardingeBridge_aligned.txt', PCR_hab_mL[delta_d1:delta_d2])
np.savetxt(r'path/to/validationFolder/PCR_Bahadurabad_aligned.txt', PCR_bah_mL[delta_d1:delta_d2])
|
#!/usr/bin/python
import sys
import os
import pickle
from optparse import OptionParser
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
from tp_utils import pipe
def write_debug_pipe(obj):
with open(pipe.DEBUG_PIPE_PATH, 'wb') as fd:
pickle.dump(obj, fd)
if __name__ == "__main__":
usage = """usage: python %prog [options]
-t, --type operation type
-m, --message operation message"""
parser = OptionParser(usage)
parser.add_option("-t", "--type", dest = "cmd_type",
action = "store", type = "string",
default = "",
help = "operation type, type candidate: query_clause,query_entity")
parser.add_option("-m", "--message", dest = "msg",
action = "store", type = "string",
default = "",
help = "operation msg")
(options, args) = parser.parse_args()
if options.cmd_type == "" or options.msg == "":
print('type and message are invalid, type:%s, message:%s'
%(options.cmd_type, options.msg))
sys.exit(-1)
print('type:%s, message:%s'%(options.cmd_type, options.msg))
write_debug_pipe((options.cmd_type, options.msg))
|
from __future__ import division
import caffe
import numpy as np
from sklearn.externals import joblib
BN_EPS = 1e-8
def get_caffenet(model_filename):
return caffe.Net(model_filename, caffe.TEST)
def load_weights(weights_filename):
return joblib.load(weights_filename)
def check_caffe_weights(weights):
# check that each caffe layer has 1 or 2 weights
# (the filters/weights and maybe a bias)
shapes = [tuple(w.shape) for w in weights]
assert len(shapes) in (1, 2)
assert 2 <= len(shapes[0]) <= 4
dim = shapes[0][0]
has_bias = (len(shapes) == 2)
group = {}
group['weight'] = weights[:1]
if len(shapes) == 2:
bias = shapes[1]
assert len(bias) == 1 and bias[0] == dim
group['shift'] = weights[1:]
return group
def check_theano_weights(weights):
# theano weights may have any of these structures:
# len(shapes) ==
# 1: (filters/weights)
# 2: (filters/weights, biases)
# 3: (filters/weights, gains, biases)
# 4: (filters/weights, BN count, BN mean, BN var)
# 5: (filters/weights, BN count, BN mean, BN var, biases)
# 6: (filters/weights, BN count, BN mean, BN var, gains, biases)
shapes = [w.shape for w in weights]
assert 1 <= len(shapes) <= 6
assert 2 <= len(shapes[0]) <= 4
if len(shapes[0]) == 4:
dim = shapes[0][0]
elif len(shapes[0]) == 2:
dim = shapes[0][1]
weights[0] = weights[0].T
else:
raise ValueError('Unknown ndims: %d' % len(shapes[0]))
group = {}
group['weight'] = weights[:1]
offset = 1
if len(shapes) >= 4:
# has BN
count, mean, var = shapes[1:4]
assert len(count) == 0
assert len(mean) == 1 and mean[0] == dim
assert len(var) == 1 and var[0] == dim
group['bn'] = weights[1:4]
offset = 4
if len(shapes) - offset >= 1:
bias = shapes[-1]
assert len(bias) == 1 and bias[0] == dim
group['shift'] = weights[-1:]
if len(shapes) - offset >= 2:
gain = shapes[-2]
assert len(gain) == 1 and gain[0] == dim
group['scale'] = weights[-2:-1]
return group
def transplant_weights(weights, caffenet, flip_filters=True, reverse_3ch=True):
weight_inds = [i for i, w in enumerate(weights) if len(w.shape) >= 2]
weights = [weights[start:end]
for start, end in zip([0] + weight_inds, weight_inds + [None])
if (end is None or end > start)]
weights_index = 0
mismatched = None
num_layers = 0
for (name, caffe_weights), theano_weights in \
zip(caffenet.params.items(), weights):
caffe_weights = check_caffe_weights(caffe_weights)
group = theano_weights = check_theano_weights(theano_weights)
if len(theano_weights) > 1 and len(caffe_weights) == 1:
print ('Layer "%s" did not match: '
'Theano had bias; Caffe layer had only weights') % name
mismatched = name
break
weights = caffe_weights['weight'][0]
source_weights = group['weight'][0]
if tuple(weights.shape) != source_weights.shape:
print ('Layer "%s" did not match: '
'weight.shape = %s != %s = source_weight.shape') \
% (name, tuple(weights.shape), source_weights.shape)
mismatched = name
break
source_params = caffenet.params[name]
scale = 1
if 'shift' in caffe_weights:
assert len(caffe_weights['shift']) == 1
shift = caffe_weights['shift'][0].data.copy()
else:
shift = 0
if 'bn' in group:
bn_params = group['bn']
assert len(bn_params) == 3
inv_scale_factor, mean, var = [p.copy() for p in bn_params]
mean, var = [p / inv_scale_factor for p in (mean, var)]
stdev = (var + BN_EPS) ** 0.5
scale /= stdev
shift -= mean
shift /= stdev
print "Merging BN into conv:", name
if 'scale' in group:
assert len(group['scale']) == 1
scale_param = group['scale'][0].copy()
scale *= scale_param
shift *= scale_param
print "Merging scale into conv:", name
if 'shift' in group:
assert len(group['shift']) == 1
shift += group['shift'][0].copy()
print "Merging shift into conv:", name
if isinstance(scale, np.ndarray):
weights.data[...] = (source_weights.T * scale).T
else:
print "Directly transplanting weights: %s" % name
assert scale == 1
weights.data[...] = source_weights[...]
if flip_filters and len(weights.shape) == 4:
weights.data[...] = weights.data[:, :, ::-1, ::-1]
if reverse_3ch and weights.shape[1] == 3:
print 'Reversing 3 channel inputs for weights:', name
weights.data[...] = weights.data[:, ::-1]
if isinstance(shift, np.ndarray):
assert 'shift' in caffe_weights, 'need bias'
bias = caffe_weights['shift'][0]
assert shift.shape == tuple(bias.shape)
bias.data[...] = shift[...]
if reverse_3ch and bias.data.shape[0] == 3:
print 'Reversing 3 channel output biases:', name
bias.data[...] = bias.data[::-1]
elif 'shift' in caffe_weights:
print "Zero initializing biases: %s" % name
caffe_weights['shift'][0].data[...] = 0
num_layers += 1
print 'Transplanted weights of %d layers' % num_layers
if mismatched is not None:
print 'Warning: mismatch starting at layer:', mismatched
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Convert train_gan.py output to caffemodel')
parser.add_argument('model', help='(*.prototxt) Caffe model specification')
parser.add_argument('weights',
help='(*.jl) weights file saved by train_gan.py')
parser.add_argument('output', help='(*.caffemodel) output Caffe model file')
args = parser.parse_args()
weights = load_weights(args.weights)
caffenet = get_caffenet(args.model)
transplant_weights(weights, caffenet)
print 'Saving transplanted caffenet to:', args.output
caffenet.save(args.output)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import linear_model
from itertools import cycle
class Lasso:
def __init__(self):
self.res = None
self.type = None
self.alpha = None
# lasso with parameter selected using cross validation
# tail_num is the first and last number of coefficients to plot
def Lasso_cv(self, X, y, n_alps=100, alps=None, plot=True, tail_num = 5):
X['intercept'] = 1
lasso = linear_model.LassoCV(n_alphas = n_alps, alphas = alps)
self.type = 'LassoCV'
res = lasso.fit(X,y)
self.res = res
coefs = pd.Series(res.coef_, index = X.columns)
print("Lasso picked " + str(sum(coefs != 0)) + " variables and eliminated the other " + str(sum(coefs == 0)) + " variables")
# get residuals
preds = pd.DataFrame({"preds":self.res.predict(X), "true":y})
preds["residuals"] = preds["true"] - preds["preds"]
# get best alpha
self.alpha = res.alpha_
print("Lasso picked the best penalty parameter",self.alpha)
# plot results if plot==True
if plot:
imp_coef = pd.concat([coefs.sort_values().head(tail_num),
coefs.sort_values().tail(tail_num)])
plt.figure(figsize=(8, 6), dpi=100)
imp_coef.plot(kind = "barh")
plt.title("Coefficients in the Lasso Model")
plt.show()
# plot residuals
plt.figure(figsize=(8, 6), dpi=100)
preds.plot(x = "true", y = "residuals",kind = "scatter")
plt.title("True values versus residuals")
plt.show()
# compute lasso path and plot coefficients along the path
def Lasso_path(self, X, y, alphs=None):
X['intercept'] = 1
alphs, path, _ = linear_model.lasso_path(X, y, alphas=alphs)
self.type = 'Lasso_path'
self.res = path
coefs = path
plt.figure(figsize=(8, 6), dpi=100)
colors = cycle(['b', 'r', 'g', 'c', 'k'])
for coef_l, c in zip(coefs, colors):
plt.plot(-np.log(alphs), coef_l, c=c)
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Coefficients along the path of Lasso')
plt.axis('tight')
plt.show()
# plain lasso
def plain_Lasso(self, X, y, alp, plot=True, tail_num=5):
X['intercept'] = 1
lasso = linear_model.Lasso(alpha=alp)
self.res = lasso.fit(X, y)
# get coefficients
coefs = pd.Series(self.res.coef_, index = X.columns)
print("Lasso picked " + str(sum(coefs != 0)) + " variables and eliminated the other " + str(sum(coefs == 0)) + " variables")
# get residuals
preds = pd.DataFrame({"preds":self.res.predict(X), "true":y})
preds["residuals"] = preds["true"] - preds["preds"]
# plot results if plot==True
if plot:
imp_coef = pd.concat([coefs.sort_values().head(tail_num),
coefs.sort_values().tail(tail_num)])
plt.figure(figsize=(8, 6), dpi=100)
imp_coef.plot(kind = "barh")
plt.title("Coefficients in the Lasso Model")
plt.show()
# plot residuals
plt.figure(figsize=(8, 6), dpi=100)
preds.plot(x = "true", y = "residuals",kind = "scatter")
plt.title("True values versus residuals")
plt.show() |
import decimal
from django.test.client import RequestFactory
from mock import patch, sentinel
from hypothesis import given
from hypothesis.strategies import characters, text, integers, booleans, datetimes, dates, decimals, uuids, binary, dictionaries
from perma.utils import *
from .utils import PermaTestCase, SentinelException
# Fixtures
def spoof_perma_payments_post():
data = {
'encrypted_data': {"timestamp": 1504884268.560902, "desired_field": "desired_field"},
}
assert 'encrypted_data' in data
assert 'timestamp' in data['encrypted_data']
assert 'desired_field' in data['encrypted_data']
return data
def one_two_three_dict():
data = {
'one': 'one',
'two': 'two',
'three': 'three'
}
assert 'one' in data
assert 'two' in data
assert 'three' in data
assert 'four' not in data
return data
# Tests
class UtilsTestCase(PermaTestCase):
def setUp(self):
self.factory = RequestFactory()
def test_get_client_ip(self):
request = self.factory.get('/some/route', REMOTE_ADDR="1.2.3.4")
self.assertEqual(get_client_ip(request), "1.2.3.4")
# communicate with perma payments
@patch('perma.utils.encrypt_for_perma_payments', autospec=True)
@patch('perma.utils.stringify_data', autospec=True)
def test_prep_for_perma_payments(self, stringify, encrypt):
stringify.return_value = sentinel.stringified
encrypt.return_value = sentinel.encrypted
assert prep_for_perma_payments({}) == sentinel.encrypted
stringify.assert_called_once_with({})
encrypt.assert_called_once_with(sentinel.stringified)
def test_process_perma_payments_transmission_encrypted_data_not_in_post(self):
with self.assertRaises(InvalidTransmissionException) as excinfo:
assert process_perma_payments_transmission({}, [])
assert 'No encrypted_data in POST.' in str(excinfo.exception)
def test_process_perma_payments_transmission_encrypted_data_none(self):
with self.assertRaises(InvalidTransmissionException) as excinfo:
assert process_perma_payments_transmission({'encrypted_data': None}, [])
assert 'No encrypted_data in POST.' in str(excinfo.exception)
def test_process_perma_payments_transmission_encrypted_data_empty(self):
with self.assertRaises(InvalidTransmissionException) as excinfo:
assert process_perma_payments_transmission({'encrypted_data': ''}, [])
assert 'No encrypted_data in POST.' in str(excinfo.exception)
@patch('perma.utils.decrypt_from_perma_payments', autospec=True)
def test_process_perma_payments_transmission_encryption_problem(self, decrypt):
decrypt.side_effect = SentinelException
with self.assertRaises(InvalidTransmissionException) as excinfo:
process_perma_payments_transmission(spoof_perma_payments_post(), [])
assert 'SentinelException' in str(excinfo.exception)
assert decrypt.call_count == 1
@patch('perma.utils.unstringify_data', autospec=True)
@patch('perma.utils.decrypt_from_perma_payments', autospec=True)
def test_process_perma_payments_transmission_not_valid_json(self, decrypt, unstringify):
unstringify.side_effect = SentinelException
with self.assertRaises(InvalidTransmissionException) as excinfo:
process_perma_payments_transmission(spoof_perma_payments_post(), [])
assert 'SentinelException' in str(excinfo.exception)
assert unstringify.call_count == 1
@patch('perma.utils.unstringify_data', autospec=True)
@patch('perma.utils.decrypt_from_perma_payments', autospec=True)
def test_process_perma_payments_transmission_missing_timestamp(self, decrypt, unstringify):
post = spoof_perma_payments_post()
del post['encrypted_data']['timestamp']
unstringify.return_value = post['encrypted_data']
with self.assertRaises(InvalidTransmissionException) as excinfo:
process_perma_payments_transmission(post, [])
assert 'Missing timestamp in data.' in str(excinfo.exception)
@patch('perma.utils.is_valid_timestamp', autospec=True)
@patch('perma.utils.unstringify_data', autospec=True)
@patch('perma.utils.decrypt_from_perma_payments', autospec=True)
def test_process_perma_payments_transmission_expired_timestamp(self, decrypt, unstringify, timestamp):
post = spoof_perma_payments_post()
unstringify_data.return_value = post['encrypted_data']
timestamp.return_value = False
with self.assertRaises(InvalidTransmissionException) as excinfo:
process_perma_payments_transmission(post, [])
assert 'Expired timestamp in data.' in str(excinfo.exception)
@patch('perma.utils.is_valid_timestamp', autospec=True)
@patch('perma.utils.unstringify_data', autospec=True)
@patch('perma.utils.decrypt_from_perma_payments', autospec=True)
def test_process_perma_payments_transmission_happy_path(self, decrypt, unstringify, timestamp):
post = spoof_perma_payments_post()
decrypt.return_value = sentinel.decrypted
unstringify.return_value = post['encrypted_data']
timestamp.return_value = True
assert process_perma_payments_transmission(post, ['desired_field']) == {'desired_field': 'desired_field'}
decrypt.assert_called_once_with(post['encrypted_data'])
unstringify.assert_called_once_with(sentinel.decrypted)
timestamp.assert_called_once_with(post['encrypted_data']['timestamp'], settings.PERMA_PAYMENTS_TIMESTAMP_MAX_AGE_SECONDS)
# perma-payments helpers
def test_retrieve_fields_returns_only_specified_fields(self):
one_two_three = one_two_three_dict()
assert retrieve_fields(one_two_three, ['one']) == {'one': 'one'}
assert retrieve_fields(one_two_three, ['two']) == {'two': 'two'}
assert retrieve_fields(one_two_three, ['one', 'three']) == {'one': 'one', 'three': 'three'}
def test_retrieve_fields_raises_if_field_absent(self):
one_two_three = one_two_three_dict()
with self.assertRaises(InvalidTransmissionException):
retrieve_fields(one_two_three, ['four'])
def test_is_valid_timestamp(self):
max_age = 60
now = to_timestamp(datetime.utcnow())
still_valid = to_timestamp(datetime.utcnow() + timedelta(seconds=max_age))
invalid = to_timestamp(datetime.utcnow() + timedelta(seconds=max_age * 2))
self.assertTrue(is_valid_timestamp(now, max_age))
self.assertTrue(is_valid_timestamp(still_valid, max_age))
self.assertFalse(is_valid_timestamp(invalid, max_age))
preserved = text(alphabet=characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))) | integers() | booleans()
@given(preserved | dictionaries(keys=text(alphabet=characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))), values=preserved))
def test_stringify_and_unstringify_data_types_preserved(self, data):
assert unstringify_data(stringify_data(data)) == data
oneway = decimals(places=2, min_value=decimal.Decimal(0.00), allow_nan=False, allow_infinity=False) | datetimes() | dates() | uuids()
@given(oneway | dictionaries(keys=text(alphabet=characters(min_codepoint=1, blacklist_categories=('Cc', 'Cs'))), values=oneway))
def test_stringify_types_lost(self, data):
# Some types can be serialized, but not recovered from strings by json.loads.
# Instead, you have to manually attempt to convert, by field, if you are expecting one of these types.
#
# If something can't be serialized, or unserialized,
# this test will raise an Exception, rather than failing with an assertion error.
unstringify_data(stringify_data(data))
@given(binary())
def test_perma_payments_encrypt_and_decrypt(self, b):
ci = encrypt_for_perma_payments(b)
assert decrypt_from_perma_payments(ci) == b
|
'''
█████████████████████████████████████████████████████████████████████████████████████████████████████████████
█▄─▄▄─█▄─▄███▄─▄▄─█─▄▄▄─█─▄─▄─█▄─▄▄▀█─▄▄─█▄─▀█▄─▄███▄─█─▄█▄─▄█─▄▄▄▄█▄─██─▄██▀▄─██▄─▄███▄─▄█░▄▄░▄█▄─▄▄─█▄─▄▄▀█
██─▄█▀██─██▀██─▄█▀█─███▀███─████─▄─▄█─██─██─█▄▀─█████▄▀▄███─██▄▄▄▄─██─██─███─▀─███─██▀██─███▀▄█▀██─▄█▀██─██─█
▀▄▄▄▄▄▀▄▄▄▄▄▀▄▄▄▄▄▀▄▄▄▄▄▀▀▄▄▄▀▀▄▄▀▄▄▀▄▄▄▄▀▄▄▄▀▀▄▄▀▀▀▀▀▄▀▀▀▄▄▄▀▄▄▄▄▄▀▀▄▄▄▄▀▀▄▄▀▄▄▀▄▄▄▄▄▀▄▄▄▀▄▄▄▄▄▀▄▄▄▄▄▀▄▄▄▄▀▀
----------------------------------------------------------------------------
Purpose: An interactive module that visualizes the electron configurations of different elements.
Displayed in an orbital format, based upon the internal calculations performed that involve the famous Dirac equation,
which describes the shape and structure of the electrons in terms of the wavefunction.
Author: John Seong
Created: 23-Mar-2022
Updated: 31-May-2022
-----------------------------------------------------------------------------
I think this project deserves a level 4+ because...
1. A real-time electron movement simulation that no other program offers in a non-scientific field of software engineering
- derived by the electrons' angular momentum formula
2. Each dot represent a probable location that the electron might reside at based upon the wavefunction
— according to the Density Functional Theory (DFT)
3. Used industry standard Quantum Mechanics simulation libaries such as ASE and GPAW for numerically computing the electron density
-----------------------------------------------------------------------------
'''
from server import create_app
import os
app = create_app()
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
'''
TIPS & TRICKS
PIP FREEZE LOCALLY: pip3 freeze -l > requirements.txt
DEPLOYING FLASK + REACT APP ON HEROKU: https://www.realpythonproject.com/how-to-setup-automated-deployment-for-multiple-apps-under-a-single-github-repository-in-heroku/
HOW TO INSTALL ASE AND GPAW ON LINUX (SERVER): http://dtu.cnwiki.dk/10302/page/2699/optional-install-ase-and-gpaw-on-your-laptop
ATOMIC ORBITAL MLAB: https://dpotoyan.github.io/Chem324/H-atom-wavef.html
SHIFT+ALT+CLICK BELOW = MULTIPLE CURSORS ON VSCODE
''' |
from .action import Action
class Create(Action):
name = 'create'
group_attrs = {
'help': 'Create supported objects'
}
|
# Generated by Django 3.2.4 on 2021-06-21 23:32
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20210622_0126'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='twitter_bio',
new_name='twitter',
),
]
|
from kfilter import kalman
from kfilter import simplify
print("kalman filter version 1.1.0")
|
import os
import pytest
@pytest.mark.skipif(
not os.getenv("FAST_BITRIX24_TEST_WEBHOOK"),
reason="Нет аккаунта, на котором можно проверить",
)
class TestsWithLiveServer:
class TestBasic:
def test_simple_add_lead(self, get_test):
b = get_test
lead_no = b.call(
"crm.lead.add", {"fields": {"NAME": "Bob", "COMMENTS": "x" * 100}}
)
b.get_by_ID("crm.lead.delete", [lead_no])
def test_simple_get_all(self, create_100_leads):
b = create_100_leads
result = b.get_all("crm.lead.list")
assert isinstance(result, list)
assert len(result) >= 100
def test_get_all_single_page(self, get_test):
b = get_test
result = b.get_all("crm.lead.fields")
assert isinstance(result, dict)
def test_get_all_params(self, create_100_leads):
b = create_100_leads
fields = ["ID", "NAME"]
leads = b.get_all("crm.lead.list", {"select": fields})
assert len(fields) == len(leads[0])
def test_issue_96(self, get_test):
from datetime import datetime
b = get_test
b.call(
"telephony.externalcall.register",
{
# 'USER_PHONE_INNER': 'OLD_LINE',
"USER_ID": 1,
"PHONE_NUMBER": "+79163345641",
"CALL_START_DATE": f"{datetime.now()}",
"CRM_CREATE": 0,
"CRM_ENTITY_TYPE": "LEAD",
"CRM_ENTITY_ID": 43707,
"SHOW": 0,
"TYPE": 1,
},
)
def test_call_batch(self, create_100_leads):
b = create_100_leads
with pytest.raises(ValueError):
b.call_batch({})
result = b.call_batch({"halt": 0, "cmd": {1: "crm.lead.list"}})
assert len(result) == 1
assert len(result["1"]) == 50
def test_get_by_ID_results(self, create_100_leads):
b = create_100_leads
leads = b.get_all("crm.lead.list")
lead_IDs = [lead["ID"] for lead in leads[:10]]
leads = b.get_by_ID("crm.lead.get", lead_IDs)
assert isinstance(leads, dict)
def test_call(self, get_test):
b = get_test
delete_IDs = b.call(
"crm.lead.add",
[
{"fields": {"NAME": "Bob"}},
{"fields": {"NAME": "Jake"}},
],
)
b.call("crm.lead.delete", [{"ID": ID} for ID in delete_IDs])
b.call("crm.lead.delete", [])
def test_call_single_param(self, get_test):
b = get_test
delete_ID = b.call("crm.lead.add", {"fields": {"NAME": "Bob"}})
b.call("crm.lead.delete", {"ID": delete_ID})
def test_issue_129(self, create_a_lead):
b, lead = create_a_lead
# итератор и прогресс-бар - ошибка
with pytest.raises(TypeError):
b.get_by_ID("crm.lead.get", iter([lead]))
with pytest.raises(TypeError):
b.call("crm.lead.get", iter([{"ID": lead}]))
# итератор и без прогресс бара - нет ошибки
b.verbose = False
b.get_by_ID("crm.lead.get", iter([lead]))
b.call("crm.lead.get", iter([{"ID": lead}]))
# sequence - нет ошибки
b.verbose = True
b.get_by_ID("crm.lead.get", [lead])
result = b.call("crm.lead.get", [{"ID": lead}])
assert isinstance(result, tuple)
result = b.call("crm.lead.get", {"ID": lead})
assert isinstance(result, dict)
# пустой итератор и без прогресс бара - нет ошибки
b.verbose = False
b.get_by_ID("crm.lead.get", iter([]))
b.call("crm.lead.get", iter([]))
def test_issue_132(self, create_100_tasks):
b = create_100_tasks
result = b.get_all("tasks.task.list")
assert result
with pytest.raises(ValueError):
result = b.list_and_get("tasks.task", "taskId")
def test_case(self, get_test):
b = get_test
with pytest.raises(
RuntimeError, match="Could not find value for parameter"
):
b.call("disk.file.get", [{"ID": 1}])
with pytest.raises(RuntimeError, match="Could not find entity with id"):
b.call("disk.file.get", [{"id": 1}])
def test_long_task_description(self, get_test):
b = get_test
lead_no = b.call(
"crm.lead.add", {"fields": {"NAME": "Bob", "COMMENTS": "x" * 10000}}
)
b.get_by_ID("crm.lead.delete", [lead_no])
class TestParamsEncoding:
def test_mobile_phone(self, get_test):
b = get_test
lead_no = b.call(
"crm.lead.add",
{
"fields": {
"NAME": "Bob",
"PHONE": [{"VALUE": "+7123456789", "VALUE_TYPE": "MOBILE"}],
}
},
)
lead = b.get_by_ID("crm.lead.get", [lead_no])[str(lead_no)]
try:
assert lead["PHONE"][0]["VALUE_TYPE"] == "MOBILE"
finally:
b.get_by_ID("crm.lead.delete", [lead_no])
def test_filter_not_equal(self, create_100_leads):
b = create_100_leads
result = b.get_all("crm.lead.list", {"FILTER": {"!STATUS_ID": "NEW"}})
assert not result
result = b.get_all("crm.lead.list", {"FILTER": {"!STATUS_ID": "CLOSED"}})
assert result
result = b.get_all("crm.lead.list", {"FILTER": {"<>STATUS_ID": "NEW"}})
assert result
result = b.get_all("crm.lead.list", {"FILTER": {"<>STATUS_ID": "CLOSED"}})
assert result
def test_product_rows(self, create_a_lead):
b, lead_no = create_a_lead
product_rows = [
{
"PRODUCT_NAME": "ssssdsd",
"PRICE": 5555,
"QUANTITY": 2,
"CURRENCY": "USD",
},
{"PRODUCT_ID": 2809, "PRICE": 100, "QUANTITY": 2},
]
b.call("crm.lead.productrows.set", {"ID": lead_no, "rows": product_rows})
result_rows = b.call("crm.lead.productrows.get", {"ID": lead_no})
assert len(product_rows) == len(result_rows)
class TestErrors:
def test_get_all(self, bx_dummy):
b = bx_dummy
with pytest.raises(Exception):
b.get_all("")
with pytest.raises(Exception):
b.get_all(123)
with pytest.raises(Exception):
b.get_all("some_method", {"select": None})
with pytest.raises(Exception):
b.get_all("some_method", {"filter": 3})
def test_get_by_ID(self, bx_dummy):
b = bx_dummy
with pytest.raises(Exception):
b.get_by_ID("_", 123)
with pytest.raises(Exception):
b.get_by_ID("_", [["a"]])
def test_call(self, bx_dummy, monkeypatch):
b = bx_dummy
async def stub(*args, **kwargs):
return {"result": {"result": {"ok"}}}
monkeypatch.setattr(b.srh, "request_attempt", stub)
assert b.srh.request_attempt is stub
b.call("_", raw=True)
with pytest.raises(Exception):
b.call("_", {})
b.call("_", [1, {"a": 2}], raw=True)
|
N, S, T = map(int, input().split())
sb, tb = S.bit_length(), T.bit_length()
print(tb - sb if (T >> max(0, tb - sb)) == S else -1)
|
"""
Author: Jianyou (Andre) Wang
Date: Sep 2020
"""
import tensorflow as tf
import numpy as np
import nltk
from nltk.corpus import wordnet as wn
from gensim.parsing.preprocessing import remove_stopwords
from collections import defaultdict, Counter
import os
import re
import random
import requests
import pickle
import heapq
import copy
from functools import reduce
import math
import multiprocessing as mp
import time
import pdb
import spacy
from gpt2.src.score import score_model
from gpt2.src.encoder import get_encoder
from .saved_objects.templates import get_first_line_templates
from .utils import utils
from .saved_objects.Finer_POS import get_finer_pos_words
class LimGen(utils):
def __init__(self, gender,prompt, search_space, retain_space,
stress=True, prob_threshold=-10, mode="multi",
relax_story_line=False, beam_search=None, multiprocessing=True, storyline="story"):
"""
Generate poems with multiple templat es given a seed word (prompt) and GPT2
search space.
Parameters
----------
prompt: str
A seed word used to kickstart poetry generation.
search_space : int
Search space of the sentence finding algorithm.
The larger the search space, the more sentences the network runs
in parallel to find the best one with the highest score.
retain_space : int
How many sentences per template to keep.
stress: bool
Whether we enforce stress.
prob_threshold: float
If the probability of a word is lower than this threshold we will not consider
this word. Set it to None to get rid of it.
"""
print("=================== Initializing ==================================")
super(LimGen,self).__init__()
self.storyline=storyline
if self.storyline=="story":
self.madlib_flag=True
else:
self.madlib_flag=False
self.multiprocessing=multiprocessing
self.pdb_flag=True
self.which_line_dict={0:"second", 1:"third",2:"fourth",3:"fifth"}
self.beam_search=beam_search
self.mode=mode
self.relax_story_line=relax_story_line
self.prob_threshold = prob_threshold
self.enforce_stress = stress
if self.mode!="multi":
self.relax_story_line=True
self.prob_threshold = None
self.finer_pos_category()
self.pos_weight()
if gender=="male":
self.temp_name="Robert"
if gender=="female":
self.temp_name="Sarah"
self.different_gender(gender)
self.create_w1s_rhyme_dict(prompt)
self.n_w25_threshold=10
#print("=============================== helper ==============================================")
self.helper(prompt)
#print("=============================== end helper ==============================================")
self.madlib_verbs = self.get_madlib_verbs(prompt,["VBD", "VBN", "VB", "VBZ", "VBP", "VBG"])
# get rid of common words
if "was" in self.madlib_verbs["VBD"]:
self.madlib_verbs["VBD"].remove("was")
#print("remove was \n")
# self.madlib_verbs = self.get_madlib_verbs(prompt,["NN","NNS"])
#print("------- Madlib Verbs ------")
#print(self.madlib_verbs)
self.last_word_dict=self.create_last_word_dict(self.w1s_rhyme_dict,self.w3s_rhyme_dict)
self.prompt=prompt
self.search_space=search_space
self.retain_space=retain_space
print("=================== Finished Initializing ==================================")
def gen_poem(self):
previous_data=[]
candidates=self.gen_first_line_new(self.temp_name.lower(),search_space=5,strict=True,seed=self.prompt)
assert len(candidates)>0, "no first line"
#print(candidates)
for text in candidates:
first_line_encodes = self.enc.encode(" ".join(text))
previous_data.append((tuple(first_line_encodes),(0,),tuple(text)+("\n",), (text[-1],"\n"),("",""),(0,)))
for which_line, num_sylls in zip(["second","third","fourth","fifth"],[9,6,6,9]):
print("====================================================================================")
print("====================================================================================")
print("======================= starting {} line generation =============================".format(which_line))
print("====================================================================================")
print("====================================================================================")
last_word_set=self.last_word_dict[which_line]
possible=self.get_all_templates(num_sylls,which_line,last_word_set)
previous_data=self.gen_line_flexible(previous_data=previous_data, possible=possible,num_sylls=num_sylls, search_space=self.search_space,retain_space=self.retain_space, which_line=which_line)
if self.beam_search=="candidate_rank":
previous_data, _ = self.diversity_sort(data=previous_data,last=True, diversity=False)
if self.beam_search=="MTBS":
previous_data, _ = self.diversity_sort_MTBS(data=previous_data,last=True, which_line=which_line)
return previous_data, self.template_to_line, self.words_to_names_rhyme_dict
def pos_weight(self):
'''
calculate the weight for each pos for each specific line
'''
def softmax(data1):
data=[d/sum(data1) for d in data1]
return data
temp=[math.exp(d) for d in data]
sum_temp=sum(temp)
return [t/sum(temp) for t in temp]
self.pos_weight_dict={}
for key in self.templates.keys():
temp=[]
for k in self.templates[key].keys():
for item in self.templates[key][k]:
temp+=list(item[0])
occur=Counter(temp)
key_list=list(occur.keys())
temp2=[]
for k in key_list:
temp2.append(len(temp)/occur[k])
ret=softmax(temp2)
occur2={}
for ii, k in enumerate(key_list):
occur2[k]=ret[ii]
self.pos_weight_dict[key]=occur2
def diversity_sort_MTBS(self,search_space=None, retain_space=None,data=None, finished=None, last=False, which_line=None):
def discount_mean(data_list):
ret=0
gamma=0.9
count=0
for i in data_list[::-1]:
ret+=i*(gamma**count)
count+=1
return ret/len(data_list)
def lined_template(temp):
lines_template=[]
line_template=[]
for i in temp:
if i!="\n":
line_template.append(i)
else:
lines_template.append(line_template)
line_template=[]
return lines_template
def total_weighted_Hamming_distance(temp1,temp2):
dist=[]
diversity_factor=1
lines_template_1=lined_template(temp1)[1:]
lines_template_2=lined_template(temp2)[1:]
'''
if len(lines_template_2)==4 and self.pdb_flag:
pdb.set_trace()
self.pdb_flag=False
'''
for i in range(len(lines_template_1)):
dist.append(weighted_Hamming_distance(lines_template_1[i],lines_template_2[i],self.which_line_dict[i]))
dist=dist[::-1] # reverse dist
ret=0
for i in range(len(dist)):
ret+=dist[i]*(diversity_factor**i)
return dist
def weighted_Hamming_distance(template1, template2, which_line):
dist=0
for i in range(max(len(template1),len(template2))):
if i<len(template1) and i<len(template2):
if template1[i]!=template2[i]:
if template1[i] in self.pos_weight_dict[which_line].keys() and template2[i] in self.pos_weight_dict[which_line].keys():
dist+=max(self.pos_weight_dict[which_line][template1[i]],self.pos_weight_dict[which_line][template2[i]])
if i>=len(template1):
if template2[i] in self.pos_weight_dict[which_line].keys():
dist+=self.pos_weight_dict[which_line][template2[i]]
if i>=len(template2):
if template1[i] in self.pos_weight_dict[which_line].keys():
dist+=self.pos_weight_dict[which_line][template1[i]]
return dist
def find_best_template(temp_data,already_selected_templates, which_line):
ret={}
if len(already_selected_templates)==0:
for t in temp_data.keys():
#ret[t]=np.mean([np.mean(item[1][-len(item[4]):]) for item in temp_data[t]])
ret[t]=np.mean([np.mean(item[1]) for item in temp_data[t]])
#ret[t]=np.mean([discount_mean(item[1]) for item in temp_data[t]])
else:
for t in temp_data.keys():
dist=np.sum([total_weighted_Hamming_distance(t,tt) for tt in already_selected_templates])
#ret[t]=np.mean([np.mean(item[1][-len(item[4]):]) for item in temp_data[t]])*dist
ret[t]=np.mean([np.mean(item[1]) for item in temp_data[t]])*dist
#ret[t]=np.mean([discount_mean(item[1]) for item in temp_data[t]])*dist
return sorted(ret.items(),key=lambda item: item[1],reverse=True)[0][0]
if last:
data_new=heapq.nlargest(len(data), data, key=lambda x: np.mean(x[1]))
return data_new,0
temp_data=defaultdict(set)
# Key is "template; current_line_template". For each key we only keep retain_space sentences
for n in data:
if not finished:
key=n[3]+n[4]
else:
key=n[3] # because the curr is already merged.
temp_data[key].add(n)
pre_select_data={}
selected_data=[]
already_selected_templates=[]
for k in temp_data.keys():
#temp=heapq.nlargest(min(len(temp_data[k]),retain_space), temp_data[k], key=lambda x: np.mean(x[1][-len(x[4]):]))
temp=heapq.nlargest(min(len(temp_data[k]),retain_space), temp_data[k], key=lambda x: np.mean(x[1]))
#temp=heapq.nlargest(min(len(temp_data[k]),retain_space), temp_data[k], key=lambda x: discount_mean(x[1]))
pre_select_data[k]=temp
count=0
while count<search_space and len(pre_select_data.keys())>0:
template_chosen=find_best_template(pre_select_data,already_selected_templates,which_line)
already_selected_templates.append(template_chosen)
selected_data+=pre_select_data[template_chosen]
del pre_select_data[template_chosen]
count+=1
return selected_data, len(temp_data.keys())
def diversity_sort(self,search_space=None, retain_space=None,data=None, finished=None, last=False, diversity=True):
"""
Given a list of sentences, put them in bins according to their templates, get
retain_space sentences from each bin and form a list, and get top search_space sentences from
the list.
Parameters
----------
search_space: int
Number of sentences returned
data: list
Input sentences
finished: bool
Whether the current sentence is completed
"""
if last:
data_new=heapq.nlargest(len(data), data, key=lambda x: np.mean(x[1]))
return data_new,0
if diversity:
temp_data=defaultdict(set)
# Key is "template; current_line_template". For each key we only keep retain_space sentences
for n in data:
if not finished:
key=n[3]+n[4]
else:
key=n[3] # because the curr is already merged.
temp_data[key].add(n)
data=[]
list_of_keys=list(temp_data.keys())
x=random.sample(list_of_keys, len(list_of_keys))
for k in x:
if not finished:
temp=heapq.nlargest(min(len(temp_data[k]),retain_space), temp_data[k], key=lambda x: np.mean(x[1]))
data.append((temp,np.max([np.mean(m[1]) for m in temp])))
else:
temp=heapq.nlargest(min(len(temp_data[k]),retain_space), temp_data[k], key=lambda x: np.mean(x[1]))
data.append((temp,np.max([np.mean(m[1]) for m in temp])))
data=heapq.nlargest(min(len(data),search_space),data, key = lambda x: x[1])
data_new=[]
for k in data:
data_new+=k[0]
data=data_new
else:
# this is normal beam search
if not finished:
data_new=heapq.nlargest(min(len(data),search_space*retain_space), data, key=lambda x: np.mean(x[1]))
else:
data_new=heapq.nlargest(min(len(data),search_space*retain_space), data, key=lambda x: np.mean(x[1]))
return data_new, 0
return data_new, len(temp_data.keys())
def gen_line_flexible(self, previous_data, possible, num_sylls, search_space, retain_space,which_line):
'''
Generate a line using multiple templates.
Parameters
----------
previous_data: list of tuples
Each element has a tuple structure (encodes, score, text, template, (w1,w3)).
encodes: list of int
encodes are gpt-index for words
score: double
Score is the probability of the line
text: list of str
the text corresponding to encodes
template: list of POS
template is all existing templates, e.g. if we are genrating third line right now, template is ["somename","second line templates"].
(w1,w3): tuple,
It records the rhyme word in this sense, second line and fifth line last word have to be
in the w1s_rhyme_dict[w1], the fourth line last word have to be in w3s_rhyme_dict[w3]. Note if we are only at line2,
then w3 is '', because it hasn't happened yet.
possible: list
Possible templates for current line.
search_space: int
We generate search_space lines and sort them by probability to find out the bes line.
num_sylls: int
Number of syllables of current line
which_line: int
which line it is (1,2,3,4 or 5)
'''
previous_data=self.encodes_align(previous_data)
sentences=[]
for i in previous_data:
template_curr=()
num_sylls_curr=0
sentences.append([i[0],i[1],i[2],i[3],template_curr,num_sylls_curr,i[4], i[5]])
# sentences is a tuple, each element looks like (encodes, score, text, template, current_line_template, how_many_syllabus_used_in_current_line, (w1,w3), moving average/word similarity list)
# curren_line_template is a partial template of the currently developing line. template is all the POS of the developing poem, with lines separated by "\n".
finished_sentences=[]
iteration=0
new_sentences=[1]
while(len(new_sentences)>0):
iteration+=1
context_token=[s[0] for s in sentences]
m=len(context_token)
context_token=np.array(context_token).reshape(m,-1)
print("******************************** gpt2 Starts Processing Next Word **********************************")
logits = score_model(model_name=self.model_name, context_token = context_token)
print("******************************** gpt2 Finished Processing Next Word **********************************")
if self.multiprocessing:
logits_list= self.split_chunks(logits)
sentences_list=self.split_chunks(sentences)
manager = mp.Manager()
output=manager.Queue()
processes = [mp.Process(target=self.batch_process_word, args=(which_line, possible,num_sylls,logits_list[mp_index], sentences_list[mp_index], output, True,retain_space)) for mp_index in range(len(logits_list)) ]
print("******************************** multiprocessing starts with {} processes *************************************".format(len(processes)))
for p in processes:
p.start()
for p in processes:
p.join()
print("********************************** multiprocessing ends *****************************************************")
results = [output.get() for p in processes]
new_sentences, quasi_finished_sentences = [], []
for result in results:
new_sentences += result[0]
quasi_finished_sentences += result[1]
else:
new_sentences, quasi_finished_sentences= self.batch_process_word(which_line, possible, num_sylls, logits, sentences)
if self.punctuation[which_line]:
if len(quasi_finished_sentences)>0:
if self.beam_search=="candidate_rank":
quasi_finished_sentences, diversity=self.diversity_sort(search_space,retain_space,quasi_finished_sentences, finished=True, diversity=False)
if self.beam_search=="MTBS":
quasi_finished_sentences, diversity=self.diversity_sort_MTBS(search_space,retain_space,quasi_finished_sentences, finished=True, which_line=which_line)
context_token=[s[0] for s in quasi_finished_sentences]
m=len(context_token)
context_token=np.array(context_token).reshape(m,-1)
print("################################## gpt2 Starts Adding Punctuation #############################")
logits = score_model(model_name=self.model_name, context_token = context_token)
print("################################## gpt2 Finished Adding Punctuation #############################")
for i,j in enumerate(logits):
sorted_index=np.argsort(-1*j)
for index in sorted_index:
word = self.enc.decode([index]).lower().strip()
if word==self.sentence_to_punctuation[which_line]:
finished_sentences.append((quasi_finished_sentences[i][0] + (index,),
quasi_finished_sentences[i][1] + (np.log(j[index]),),
quasi_finished_sentences[i][2]+(word,),
quasi_finished_sentences[i][3]+(word,),
quasi_finished_sentences[i][4],
quasi_finished_sentences[i][5]))
break
else:
for q in quasi_finished_sentences:
finished_sentences.append(q)
print("========================= iteration {} ends =============================".format(iteration))
if self.beam_search=="candidate_rank":
sentences, diversity=self.diversity_sort(search_space,retain_space,new_sentences, finished=False, diversity=False)
if self.beam_search=="MTBS":
sentences, diversity=self.diversity_sort_MTBS(search_space,retain_space,new_sentences, finished=False, which_line=which_line)
print("{} sentences before diversity_sort, {} sentences afterwards, diversity {}, this iteration has {} quasi_finished_sentences, now {} finished_sentences \n".format(len(new_sentences),len(sentences), diversity, len(quasi_finished_sentences),len(finished_sentences)))
assert len(sentences)==0, "something wrong"
if self.beam_search=="candidate_rank":
previous_data_temp, _=self.diversity_sort(search_space,retain_space,finished_sentences, finished=True,diversity=False)
if self.beam_search=="MTBS":
previous_data_temp, _=self.diversity_sort_MTBS(search_space,retain_space,finished_sentences, finished=True, which_line=which_line)
previous_data=[(i[0],i[1],i[2]+("\n",),i[3]+("\n",),i[4],i[5]+(0,)) for i in previous_data_temp]
return previous_data
def batch_process_word(self, which_line, possible, num_sylls, logits, sentences, output=None, madlib_flag=None, retain_space=None):
'''
Batch process the new possible word of a group of incomplete sentences.
Parameters
----------
possible: list
list of possible templates
num_sylls: int
we generate search_space lines and sort them by probability to find out the bes line.
which_line: int
which line it is (1,2,3,4 or 5)
num_sylls: int
wumber of syllables of current line.
logits: list
Logits is the output of GPT model.
sentences: list
List of sentences that we currently are generating.
'''
new_sentences = []
quasi_finished_sentences = []
for i,j in enumerate(logits):
if self.beam_search=="candidate_rank":
new_sentences_per_beam=[]
quasi_finished_sentences_per_beam=[]
sorted_index=np.argsort(-1*j)
word_list_against_duplication=[]
# sentences is a tuple, each element looks like (encodes, score, text, template, current_line_template, how_many_syllabus_used_in_current_line, (w1,w3), moving average)
# curren_line_template is a partial template of the currently developing line.
#template is all the POS of the developing poem, with lines separated by "\n".
template_curr=sentences[i][4]
num_sylls_curr=sentences[i][5]
moving_avg_curr=sentences[i][7][-1]
rhyme_set_curr = set()
if which_line=="second":
rhyme_set_curr = self.w1s_rhyme_dict.keys()
rhyme_word="second_line_special_case"
if which_line=="fifth":
rhyme_set_curr=self.w1s_rhyme_dict[sentences[i][6][0]]
rhyme_word=sentences[i][6][0]
if which_line=="third":
rhyme_set_curr = self.w3s_rhyme_dict.keys()
rhyme_word="third_line_special_case"
if which_line=="fourth":
rhyme_set_curr = self.w3s_rhyme_dict[sentences[i][6][1]]
rhyme_word=sentences[i][6][1]
assert len(rhyme_set_curr)>0
# If it is the fifth line, the current template has to corresponds to the fourth line template
# because they are usually one sentence
for ii,index in enumerate(sorted_index):
# Get current line's template, word embedding average, word, rhyme set, etc.
relax_story_line=False
word = self.enc.decode([index]).lower().strip()
if word not in self.total_vocab:
continue
if self.prob_threshold is not None and np.log(j[index]) < self.prob_threshold:
relax_story_line=True # when we are considering low probability words, we relax the last word pos constraints.
if len(new_sentences)>=10 or len(quasi_finished_sentences)>0:
break
if word in word_list_against_duplication:
continue
elif len(word)==0:
continue
# note that both , and . are in these keys()
elif word not in self.words_to_pos.keys() or word not in self.dict_meters.keys():
continue
else:
if index in self.blacklist_index:
continue
pos_set=self.get_word_pos(word)
sylls_set=set([len(m) for m in self.dict_meters[word]])
if len(pos_set)==0 or len(sylls_set)==0:
continue
# If the word is a noun or adjective and has appeared
# previously, we discard the sentence.
if self.is_duplicate_in_previous_words(word, sentences[i][2]):
continue
# If stress is incorrect, continue
if self.enforce_stress:
possible_syllables = self.dict_meters[word]
word_length = min(sylls_set)
stress = [1, 4] if (which_line == "third" or which_line == "fourth") else [1, 4, 7]
correct_stress = True
# There is a stress on current word
for stress_position in stress:
if num_sylls_curr <= stress_position and num_sylls_curr + word_length > stress_position:
stress_syllable_pos = stress_position - num_sylls_curr
if all(s[stress_syllable_pos] != '1' for s in possible_syllables):
correct_stress = False
break
if not correct_stress:
continue
# end_flag is the (POS, Sylls) of word if word can be the last_word for a template, False if not
# continue_flag is (POS,Sylls) if word can be in a template and is not the last word. False if not
continue_flag=self.template_sylls_checking(pos_set=pos_set,sylls_set=sylls_set,template_curr=template_curr,num_sylls_curr=num_sylls_curr,possible=possible, num_sylls=num_sylls, rhyme_set_curr=rhyme_set_curr)
end_flag=self.end_template_checking(pos_set=pos_set,sylls_set=sylls_set,template_curr=template_curr,num_sylls_curr=num_sylls_curr,possible=possible, num_sylls=num_sylls, relax_story_line=relax_story_line)
# placeholder code, no effect, only to resolve compatibility issue.
tuple_of_wema=tuple([m for m in sentences[i][7][:-1]])+(0,)
if continue_flag:
word_list_against_duplication.append(word)
for continue_sub_flag in continue_flag:
# If current word POS is VB, current line is second line and word is not in our
# precomputed list, throw away the sentence
if self.madlib_flag:
curr_vb_pos = continue_sub_flag[0]
if 'VB' in curr_vb_pos and which_line == 'second' \
and not any('VB' in pos_tag for pos_tag in template_curr):
if word not in self.madlib_verbs[curr_vb_pos]:
continue
word_tuple = (sentences[i][0] + (index,),
sentences[i][1] + (np.log(j[index]),),
sentences[i][2]+(word,),
sentences[i][3],
sentences[i][4]+(continue_sub_flag[0],),
sentences[i][5]+continue_sub_flag[1],
sentences[i][6],
tuple_of_wema)
if self.beam_search=="candidate_rank":
if len(new_sentences_per_beam)<retain_space:
new_sentences_per_beam.append(word_tuple)
else:
new_sentences.append(word_tuple)
if end_flag:
for end_sub_flag in end_flag:
if which_line=="second":
if word in rhyme_set_curr:
word_list_against_duplication.append(word)
word_tuple=(sentences[i][0] + (index,),
sentences[i][1] + (np.log(j[index]),),
sentences[i][2]+(word,),
sentences[i][3]+sentences[i][4]+(end_sub_flag[0],),
(word,""),
tuple_of_wema)
if self.beam_search=="candidate_rank":
if len(quasi_finished_sentences_per_beam)<retain_space:
quasi_finished_sentences_per_beam.append(word_tuple)
else:
quasi_finished_sentences.append(word_tuple)
if which_line=="third":
if word in rhyme_set_curr:
word_list_against_duplication.append(word)
word_tuple=(sentences[i][0] + (index,),
sentences[i][1] + (np.log(j[index]),),
sentences[i][2]+(word,),
sentences[i][3]+sentences[i][4]+(end_sub_flag[0],),
(sentences[i][6][0],word),
tuple_of_wema)
if self.beam_search=="candidate_rank":
if len(quasi_finished_sentences_per_beam)<retain_space:
quasi_finished_sentences_per_beam.append(word_tuple)
else:
quasi_finished_sentences.append(word_tuple)
if which_line=="fourth" or which_line=="fifth":
if word in rhyme_set_curr:
word_list_against_duplication.append(word)
word_tuple=(sentences[i][0] + (index,),
sentences[i][1] + (np.log(j[index]),),
sentences[i][2]+(word,),
sentences[i][3]+sentences[i][4]+(end_sub_flag[0],),
sentences[i][6],
tuple_of_wema)
if self.beam_search=="candidate_rank":
if len(quasi_finished_sentences_per_beam)<retain_space:
quasi_finished_sentences_per_beam.append(word_tuple)
else:
quasi_finished_sentences.append(word_tuple)
if self.beam_search=="candidate_rank":
new_sentences+=new_sentences_per_beam
quasi_finished_sentences+=quasi_finished_sentences_per_beam
if self.multiprocessing:
output.put((new_sentences, quasi_finished_sentences))
else:
return new_sentences, quasi_finished_sentences
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Helper function to generate Figure-of-Merit (FoM) plots.
Called by 'runToolkit.py' and 'runToolkitExtended.py'.
For the full procedure, see "README.md".
For method details, please see "A toolkit for data-driven discovery of governing equations in
high-noise regimes" (2022) by C.B. Delahunt and J.N. Kutz.
For these FoM plots to appear in separate windows, first run in the python console:
%matplotlib qt
Copyright (c) 2021 Charles B. Delahunt. delahunt@uw.edu
MIT License
"""
import numpy as np
from matplotlib import pyplot as plt
def plotFoMSubplot_fn(y, x, ax, titleStr, yLims, varNames, c, thisMark, legendLoc='',
idealLineVal=None, offset=0):
"""
Parameters
----------
y : np.array of floats (maybe of ints). The FoM's values
x : np.array (vector) of ints (the iteration numbers)
ax: matplotlib axis object
titleStr : str
yLims : list of floats, length = 2
varNames : list of str
c : list of str (color names)
thisMark : str (marker type)
legendLoc : str, default = ''
idealLineVal : float, default = None-> don't draw a line at the ideal value.
offset : float (offsets marker locations on x-axis)
Returns
-------
None. Renders a subplot
"""
numVars = len(varNames)
if np.ndim(y) == 1: # case: a dummy value was put in because evolutions were skipped.
temp = np.zeros((len(y), numVars))
for i in range(len(y)):
temp[i, :] = y[i]
y = temp
if np.ndim(y) == 3: # weird ju-ju
y = y[:, 0, :]
if idealLineVal != None:
ax.plot(x, idealLineVal * np.ones(x.shape), 'k--')
for i in range(numVars):
ax.scatter(x + i * 0.05 + offset, y[:, i], marker=thisMark, color=c[i], s=12,
label=varNames[i])
ax.set_title(titleStr, fontsize=12, fontweight='bold')
if len(legendLoc) > 0:
ax.legend(fontsize=12, loc=legendLoc)
ax.set_ylim(yLims)
ax.xaxis.set_tick_params(labelbottom=True)
ax.grid(b=True, which='both', axis='x')
# --------- End of plotFoMSubplot_fn---------------------
def plotFiguresOfMeritMosaics_fn(d):
"""
Function to create Figure-of-Merit (FoM) mosaics after a set of runs. 'toolkitRunWrapper'
generates, for each training trajectory, two things: a sequence of progressively sparser models
created by linear fits to that trajectory's derivatives with progressively culledfunctionals;
and sequences of FoMs for these models, related to how well predicted trajectories evolved by
these models match the training trajectory and the validation trajectories.
This function plots these FoM sequences in a set of subplots. This enables the user to see
which models best balance good predicted trajectories and sparse discovered libraries. It also
allows the user to spot potentially valuable functionals (eg by a sudden drop in FoMs when that
functional was culled); for this latter purpose, the user must also consult the text history
outputted by 'tookitRunWrapper', which records which functionals were culled at each iteration.
This function is typically used as follows.
1. It is called at the end of 'toolkitRunWrapper'. The user then examines the mosaics
to select a 'best' model (noted by iteration number) for each training trajectory.
enters
2. 'plotSelectedIterations' is run with these best model numbers as input. This (a) plots
predicted trajectories of the 'best' models; and (b) writes to console an
'initialFunctionsToUseArray', which is the union of the 'best' models' discovered libraries.
3. This array is entered as a 'user entry' in 'toolkitRunWrapper', which is then run again
with the constrained union library.
4. Repeat steps 1 and 2, to decide final best models.
Mosaic format: For each train trajectory, plot one mosaic 5 x 4: first two columns = home
trajectory FoM sequences, last 2 columns = results on each val trajectory, using the home
trajectory model.
Parameters
----------
d : dict of FoM sequences
Returns
-------
None. Generates mosaics of plots, one for each training trajectory.
"""
# Unpack d, the dataDict:
seed = d['seed']
numTrajTrain = d['numTrajTrain']
variableNames = d['variableNames']
functionList = d['functionList']
indsValAll = d['indsValAll']
historyWhichIterAll = d['historyWhichIterAll']
historyCoeffArrayAll = d['historyCoeffArrayAll']
historyMinHistCorrelationForEvolutionsAll = d['historyMinHistCorrelationForEvolutionsAll']
historyInBoundsFoMAll = d['historyInBoundsFoMAll']
historyInEnvelopeFoMAll = d['historyInEnvelopeFoMAll']
historyStdDevFoMAll = d['historyStdDevFoMAll']
historyHistogramCorrelationFoMAll = d['historyHistogramCorrelationFoMAll']
historyXDotDiffEnvelopeAll = d['historyXDotDiffEnvelopeAll']
historyMinHistCorrelationForEvolutionsValAll = \
d['historyMinHistCorrelationForEvolutionsValAll']
historyInBoundsFoMValAll = d['historyInBoundsFoMValAll']
historyInEnvelopeFoMValAll = d['historyInEnvelopeFoMValAll']
historyFftCorrelationFoMAll = d['historyFftCorrelationFoMAll']
historyXDotHistogramCorrelationFoMAll = d['historyXDotHistogramCorrelationFoMAll']
historyStdDevFoMValAll = d['historyStdDevFoMValAll']
historyHistogramCorrelationFoMValAll = d['historyHistogramCorrelationFoMValAll']
historyFftCorrelationFoMValAll = d['historyFftCorrelationFoMValAll']
historyXDotHistogramCorrelationFoMValAll = d['historyXDotHistogramCorrelationFoMValAll']
historyXDotDiffEnvelopeValAll = d['historyXDotDiffEnvelopeValAll']
markerList = ['o', 'v', 's', 'P', 'd']
for traj in range(numTrajTrain):
indsVal = indsValAll[traj] # to determine which symbol to use
# 1. FoM for results on xTrain:
tag = 'Train ' + str(traj) + '. '
thisMark = 'o'
colorList = ['r', 'b', 'g', 'm', 'c']
fig, axs = plt.subplots(nrows=6, ncols=3, sharex=True, figsize=(9, 12))
xVals = np.array(historyWhichIterAll[traj]) # same for all subplots. Val runs only
# (confirm): xVals may be one too long for salvaged crashed runs (since last iteration not
# recorded due to the crash). This is checked in the Val section below.
# Make the various subplots. For each, define the y-values (FoM values) and some params,
# then call the subplot function.
# [0, 0]
r = 0 # row in mosaic
c = 0 # col in mosaic
historyCoeffArray = historyCoeffArrayAll[traj]
this = np.array(historyCoeffArray) # numCulls x numVariables x numFunctionals
this = np.sum(np.abs(this) > 1e-5, axis=2) # numCulls x numVariables
titleStr = tag + '(seed ' + str(seed) + '). Number of functionals per variable'
yLims = [-0.1, len(functionList) + 0.1]
legendLoc = 'upper right'
idealLineVal = None
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# FoMs for derivative estimates:
# [1, 0]:
r = 1
c = 0
historyXDotDiffEnvelope = historyXDotDiffEnvelopeAll[traj]
this = np.array(historyXDotDiffEnvelope)
titleStr = tag + 'xDots 80th percentile of errors, ideal = 0'
yLims = [-0.1, 1.5] # This plot did not have a defined yLim range
legendLoc = ''
idealLineVal = 0
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [2, 0]:
r = 2
c = 0
historyXDotHistogramCorrelationFoM = historyXDotHistogramCorrelationFoMAll[traj]
this = np.array(historyXDotHistogramCorrelationFoM)
titleStr = tag + 'xDot histogram correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [3, 0]:
r = 3
c = 0
tag2 = 'Val '
thisValList = np.array(historyXDotDiffEnvelopeValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v]]
this = thisValList[:, v,]
titleStr = tag2 + 'xDots 80th percentile of errors, ideal = 0'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 0
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [4, 0]:
r = 4
c = 0
tag2 = 'Val '
thisValList = np.array(historyXDotHistogramCorrelationFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v]]
this = thisValList[:, v,]
titleStr = tag2 + 'xDot histogram correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
#%% FoMs for the predicted Training trajectory:
# [0, 1]:
r = 0
c = 1
thisMark = 'o'
historyMinHistCorrelationForEvolutions = historyMinHistCorrelationForEvolutionsAll[traj]
this = np.array(historyMinHistCorrelationForEvolutions)
titleStr = tag + 'Histogram correlation between evolutions, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [1, 1]:
r = 1
c = 1
historyInBoundsFoM = historyInBoundsFoMAll[traj]
this = np.array(historyInBoundsFoM)
titleStr = tag + 'In-bounds fraction, ideal = 1'
yLims = [-0.1, 1.1]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [2, 1]:
r = 2
c = 1
historyInEnvelopeFoM = historyInEnvelopeFoMAll[traj]
this = np.array(historyInEnvelopeFoM)
titleStr = tag + 'In-envelope fraction, ideal = 1'
yLims = [-0.1, 1.1]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [3, 1]:
r = 3
c = 1
historyStdDevFoM = historyStdDevFoMAll[traj]
this = np.array(historyStdDevFoM)
this[this > 2] = 1
this[this < -2] = -1 # not necessary
titleStr = tag + 'Relative error of std dev, ideal = 0'
yLims = [-0.1, 2.1]
legendLoc = ''
idealLineVal = 0
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [4, 1]:
r = 4
c = 1
historyHistogramCorrelationFoM = historyHistogramCorrelationFoMAll[traj]
this = np.array(historyHistogramCorrelationFoM)
titleStr = tag + 'Histogram correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
# [5, 1]:
r = 5
c = 1
historyFftCorrelationFoM = historyFftCorrelationFoMAll[traj]
this = np.array(historyFftCorrelationFoM)
titleStr = tag + 'FFT correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal)
#%% FoMs for the predicted Validation trajectories:
# Plot all val trajectories in one plot, using shapes to distinguish them (colors stand
# for variables, as with train trajectory results).
tag = 'Val '
# When a run crashes and we wish to salvage it, some histories will be shorter, so reduce
# xVals by 1 in this case:
if len(np.array(historyMinHistCorrelationForEvolutionsValAll[traj])[:,0,0]) < len(xVals):
xVals = xVals[0:-1]
# skip the first box (col 3, row 0) since the functional count is same as for home traj.
# [0, 2]:
r = 0
c = 2
thisValList = np.array(historyMinHistCorrelationForEvolutionsValAll[traj])
# numIters x numValTrajectories x numVars array
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = np.array(thisValList[:, v, ])
titleStr = tag + 'Histogram correlation between evolutions, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [1, 2]:
r = 1
c = 2
thisValList = np.array(historyInBoundsFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = thisValList[:, v,]
titleStr = tag + 'In-bounds fraction, ideal = 1'
yLims = [-0.1, 1.1]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [2, 2]:
r = 2
c = 2
thisValList = np.array(historyInEnvelopeFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = thisValList[:, v,]
titleStr = tag + 'In-envelope fraction, ideal = 1'
yLims = [-0.1, 1.1]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [3, 2]:
r = 3
c = 2
thisValList = np.array(historyStdDevFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = thisValList[:, v,]
this[this > 2] = 1
this[this < -2] = -1 # not necessary
titleStr = tag + 'Relative error of std dev, ideal = 0'
yLims = [-0.1, 1.1]
legendLoc = ''
idealLineVal = 0
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [4, 2]:
r = 4
c = 2
thisValList = np.array(historyHistogramCorrelationFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = thisValList[:, v,]
titleStr = tag + 'Histogram correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
# [5, 2]:
r = 5
c = 2
thisValList = np.array(historyFftCorrelationFoMValAll[traj])
for v in range(numTrajTrain - 1):
thisMark = markerList[indsVal[v] + 1]
this = thisValList[:, v,]
titleStr = tag + 'FFT correlation, ideal = 1'
yLims = [-0.1, 1.5]
legendLoc = ''
idealLineVal = 1
offset = -0.1 + v * 0.2 # to offset the different validation run results
plotFoMSubplot_fn(this, xVals, axs[r, c], titleStr, yLims, variableNames, colorList,
thisMark, legendLoc, idealLineVal, offset)
plt.tight_layout(pad=0.5)
fig.show() # Show mosaic for this home trajectory.
#%%
# MIT license:
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
import os.path as path
import pytest
from pymongo_inmemory import downloader
def test_env_folders_overwrite_default_downloadfolder(monkeypatch):
monkeypatch.setenv("PYMONGOIM__DOWNLOAD_FOLDER", "test_folder")
assert downloader._download_folder() == "test_folder"
def test_env_folders_overwrite_default_extractfolder(monkeypatch):
monkeypatch.setenv("PYMONGOIM__EXTRACT_FOLDER", "test_folder")
assert downloader._extract_folder() == "test_folder"
def test_default_dl_folder(monkeypatch, tmpdir):
monkeypatch.setattr(downloader, "CACHE_FOLDER", tmpdir)
assert path.samefile(
downloader._download_folder(),
path.join(tmpdir, "download")
)
assert path.exists(path.join(tmpdir, "download"))
def test_default_extract_folder(monkeypatch, tmpdir):
monkeypatch.setattr(downloader, "CACHE_FOLDER", tmpdir)
assert path.samefile(
downloader._extract_folder(),
path.join(tmpdir, "extract")
)
assert path.exists(path.join(tmpdir, "extract"))
def test_extracted_folder(monkeypatch, tmpdir):
monkeypatch.setattr(downloader, "CACHE_FOLDER", tmpdir)
assert path.samefile(
downloader._extracted_folder("mongodb-amazon2-x86_64-1.1.1.tar"),
path.join(tmpdir, "extract", "mongodb-amazon2-x86_64-1.1.1-tar")
)
assert path.samefile(
downloader._extracted_folder("mongodb-windows-x86_64-1.1.1.zip"),
path.join(tmpdir, "extract", "mongodb-windows-x86_64-1.1.1-zip")
)
def test_make_folder(monkeypatch, tmpdir):
assert path.samefile(
downloader._mkdir_ifnot_exist(tmpdir, "test"),
path.join(tmpdir, "test")
)
assert path.exists(path.join(tmpdir, "test"))
assert path.samefile(
downloader._mkdir_ifnot_exist(tmpdir, "test2", "nested"),
path.join(tmpdir, "test2", "nested")
)
assert path.exists(path.join(tmpdir, "test2", "nested"))
def test_fails_if_os_unknown(monkeypatch):
def system():
return "Unknown"
def conf(*args, **kwargs):
return
monkeypatch.setattr(downloader.platform, "system", system)
monkeypatch.setattr(downloader, "conf", conf)
with pytest.raises(downloader.OperatingSystemNotFound):
downloader.download()
|
from collections import Counter
import copy
from Item import ItemInfo
from ItemPool import triforce_blitz_items
from Region import Region, TimeOfDay
class State(object):
def __init__(self, parent):
self.prog_items = Counter()
self.world = parent
self.search = None
if self.world.settings.triforce_hunt:
self._won = self.won_triforce_hunt
elif self.world.settings.triforce_blitz:
self._won = self.won_triforce_blitz
else:
self._won = self.won_normal
## Ensure that this will always have a value
@property
def is_glitched(self):
return self.world.settings.logic_rules != 'glitchless'
def copy(self, new_world=None):
if not new_world:
new_world = self.world
new_state = State(new_world)
new_state.prog_items = copy.copy(self.prog_items)
return new_state
def item_name(self, location):
location = self.world.get_location(location)
if location.item is None:
return None
return location.item.name
def won(self):
return self._won()
def won_triforce_hunt(self):
return self.has('Triforce Piece', self.world.settings.triforce_goal_per_world)
def won_triforce_blitz(self):
return self.has_all_of(triforce_blitz_items)
def won_normal(self):
return self.has('Triforce')
def has(self, item, count=1):
return self.prog_items[item] >= count
def has_any_of(self, items):
return any(map(self.prog_items.__contains__, items))
def has_all_of(self, items):
return all(map(self.prog_items.__contains__, items))
def count_of(self, items):
return len(list(filter(self.prog_items.__contains__, items)))
def item_count(self, item):
return self.prog_items[item]
def has_bottle(self, **kwargs):
# Extra Ruto's Letter are automatically emptied
return self.has_any_of(ItemInfo.bottles) or self.has('Rutos Letter', 2)
def has_hearts(self, count):
# Warning: This only considers items that are marked as advancement items
return self.heart_count() >= count
def heart_count(self):
# Warning: This only considers items that are marked as advancement items
return (
self.item_count('Heart Container')
+ self.item_count('Piece of Heart') // 4
+ 3 # starting hearts
)
def has_medallions(self, count):
return self.count_of(ItemInfo.medallions) >= count
def has_stones(self, count):
return self.count_of(ItemInfo.stones) >= count
def has_dungeon_rewards(self, count):
return (self.count_of(ItemInfo.medallions) + self.count_of(ItemInfo.stones)) >= count
def has_item_goal(self, item_goal):
return self.prog_items[item_goal['name']] >= item_goal['minimum']
def has_full_item_goal(self, category, goal, item_goal):
local_goal = self.world.goal_categories[category.name].get_goal(goal.name)
per_world_max_quantity = local_goal.get_item(item_goal['name'])['quantity']
return self.prog_items[item_goal['name']] >= per_world_max_quantity
def has_all_item_goals(self):
for category in self.world.goal_categories.values():
for goal in category.goals:
if not all(map(lambda i: self.has_full_item_goal(category, goal, i), goal.items)):
return False
return True
def had_night_start(self):
stod = self.world.settings.starting_tod
# These are all not between 6:30 and 18:00
if (stod == 'sunset' or # 18
stod == 'evening' or # 21
stod == 'midnight' or # 00
stod == 'witching-hour'): # 03
return True
else:
return False
# Used for fall damage and other situations where damage is unavoidable
def can_live_dmg(self, hearts):
mult = self.world.settings.damage_multiplier
if hearts*4 >= 3:
return mult != 'ohko' and mult != 'quadruple'
elif hearts*4 < 3:
return mult != 'ohko'
else:
return True
# Use the guarantee_hint rule defined in json.
def guarantee_hint(self):
return self.world.parser.parse_rule('guarantee_hint')(self)
# Be careful using this function. It will not collect any
# items that may be locked behind the item, only the item itself.
def collect(self, item):
if item.alias:
self.prog_items[item.alias[0]] += item.alias[1]
if item.advancement:
self.prog_items[item.name] += 1
# Be careful using this function. It will not uncollect any
# items that may be locked behind the item, only the item itself.
def remove(self, item):
if item.alias and self.prog_items[item.alias[0]] > 0:
self.prog_items[item.alias[0]] -= item.alias[1]
if self.prog_items[item.alias[0]] <= 0:
del self.prog_items[item.alias[0]]
if self.prog_items[item.name] > 0:
self.prog_items[item.name] -= 1
if self.prog_items[item.name] <= 0:
del self.prog_items[item.name]
def region_has_shortcuts(self, region_name, fallback_dungeon):
region = self.world.get_region(region_name)
dungeon_name = (region.dungeon and region.dungeon.name) or fallback_dungeon
if not dungeon_name:
return False
return dungeon_name in self.world.settings.dungeon_shortcuts
def __getstate__(self):
return self.__dict__.copy()
def __setstate__(self, state):
self.__dict__.update(state)
|
from gym_cooking.environment.game.game import Game
from gym_cooking.environment import cooking_zoo
n_agents = 2
num_humans = 1
max_steps = 100
render = False
level = 'open_room_salad'
seed = 1
record = False
max_num_timesteps = 1000
recipes = ["TomatoLettuceOnionSalad", 'TomatoLettuceOnionSalad']
parallel_env = cooking_zoo.parallel_env(level=level, num_agents=n_agents, record=record,
max_steps=max_num_timesteps, recipes=recipes)
action_spaces = parallel_env.action_spaces
player_2_action_space = action_spaces["player_1"]
class CookingAgent:
def __init__(self, action_space):
self.action_space = action_space
def get_action(self, observation) -> int:
return self.action_space.sample()
cooking_agent = CookingAgent(player_2_action_space)
game = Game(parallel_env, num_humans, [cooking_agent], max_steps)
store = game.on_execute()
print("done")
|
from sphinx.directives.code import CodeBlock
class PylitFile(CodeBlock):
def run(self):
caption = self.options.get('caption')
if not caption:
caption = ""
newcaption = '<<' + caption + '>>=='
self.options['caption'] = newcaption
# format the block and return
return super(PylitFile, self).run()
|
"""Test 2D autorefocusing of legacy minimizer"""
import numpy as np
import nrefocus
import pytest
from test_helper import load_cell
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_cell_helmholtz_average_gradient():
rf = nrefocus.iface.RefocusNumpy(field=load_cell("HL60_field.zip"),
wavelength=647e-9,
pixel_size=0.139e-6,
kernel="helmholtz",
)
# attempt to autofocus with standard arguments
d = rf.autofocus(metric="average gradient",
minimizer="legacy",
interval=(-5e-6, 5e-6))
assert np.allclose(d, -8.69809203142537e-07, atol=0)
nfield = rf.propagate(d)
assert np.allclose(nfield[10, 10],
1.045874817165857-0.020467790949516538j,
atol=0)
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_helmholtz_average_gradient():
field = 1*np.exp(1j*np.linspace(.1, .5, 256)).reshape(16, 16)
d = 5
nm = 1.533
res = 8.25
method = "helmholtz"
# first propagate the field
rfield = nrefocus.refocus(field=field,
d=d,
nm=nm,
res=res,
method=method)
# then try to refocus it
d, nfield = nrefocus.autofocus(
field=rfield,
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric="average gradient",
minimizer="legacy",
padding=True,
num_cpus=1,
)
assert np.allclose(d, -3.263187429854096)
assert np.allclose(0, np.angle(nfield/rfield), atol=.047)
assert np.allclose(1, np.abs(nfield/rfield), atol=.081)
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_helmholtz_average_gradient_zero():
field = 1*np.exp(1j*np.linspace(.1, .5, 256)).reshape(16, 16)
d = 0
nm = 1.533
res = 8.25
method = "helmholtz"
# first propagate the field
rfield = nrefocus.refocus(field=field,
d=d,
nm=nm,
res=res,
method=method,
padding=False
)
# then try to refocus it
_, nfield = nrefocus.autofocus(
field=rfield,
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric="average gradient",
minimizer="legacy",
padding=False, # without padding, result must be exact
num_cpus=1,
)
assert np.allclose(nfield.flatten().view(float),
rfield.flatten().view(float))
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_fresnel_average_gradient():
field = 1*np.exp(1j*np.linspace(.1, .5, 256)).reshape(16, 16)
d = 5
nm = 1.533
res = 8.25
method = "fresnel"
# first propagate the field
rfield = nrefocus.refocus(field=field,
d=d,
nm=nm,
res=res,
method=method)
# then try to refocus it
_, nfield = nrefocus.autofocus(
field=rfield,
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric="average gradient",
minimizer="legacy",
padding=True,
num_cpus=1)
assert np.allclose(0, np.angle(nfield/rfield), atol=.125)
assert np.allclose(1, np.abs(nfield/rfield), atol=.147)
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_return_grid_field():
rf = nrefocus.iface.RefocusNumpy(field=load_cell("HL60_field.zip"),
wavelength=647e-9,
pixel_size=0.139e-6,
kernel="helmholtz",
)
# attempt to autofocus with standard arguments
d, (dgrid, mgrid), nfield = rf.autofocus(
metric="average gradient",
minimizer="legacy",
interval=(-5e-6, 5e-6),
ret_grid=True,
ret_field=True,
)
idx_metric_min = np.argmin(mgrid)
idx_distance = np.argmin(np.abs(dgrid - d))
assert idx_metric_min == idx_distance
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_stack_same_dist_nopadding():
d = 5.5
nm = 1.5133
res = 6.25
method = "helmholtz"
size = 10
metric = "average gradient"
stack = 1*np.exp(1j*np.linspace(.1, .5, size**3)).reshape(size, size, size)
rfield = nrefocus.refocus_stack(fieldstack=stack,
d=d,
nm=nm,
res=res,
method=method)
ds, nfield = nrefocus.autofocus_stack(
fieldstack=rfield.copy(),
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric=metric,
minimizer="legacy",
padding=False,
same_dist=False,
num_cpus=1,
copy=True)
# reconstruction distance is same in above case
ds_same, nfield_same = nrefocus.autofocus_stack(
fieldstack=rfield.copy(),
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric=metric,
minimizer="legacy",
padding=False,
same_dist=True,
num_cpus=1,
copy=True)
assert np.allclose(np.mean(ds), -4.867283950617284)
assert np.all(np.array(ds) == ds_same)
assert np.all(np.array(ds) == np.mean(ds))
assert np.allclose(nfield.flatten().view(float),
nfield_same.flatten().view(float),
atol=.000524)
@pytest.mark.filterwarnings('ignore::nrefocus.minimizers.mz_legacy.'
'LegacyDeprecationWarning')
def test_2d_autofocus_stack_same_dist():
d = 5.5
nm = 1.5133
res = 6.25
method = "helmholtz"
size = 10
metric = "average gradient"
stack = 1*np.exp(1j*np.linspace(.1, .5, size**3)).reshape(size, size, size)
rfield = nrefocus.refocus_stack(fieldstack=stack,
d=d,
nm=nm,
res=res,
method=method,
padding=True)
ds, nfield = nrefocus.autofocus_stack(
fieldstack=1*rfield,
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric=metric,
minimizer="legacy",
padding=True,
same_dist=False,
num_cpus=1,
copy=True)
assert np.allclose(np.array(rfield).flatten().view(float),
np.array(nfield).flatten().view(float),
atol=.013)
# reconstruction distance is same in above case
ds_same, nfield_same = nrefocus.autofocus_stack(
fieldstack=1*rfield,
nm=nm,
res=res,
ival=(-1.5*d, -0.5*d),
roi=None,
metric=metric,
minimizer="legacy",
padding=True,
same_dist=True,
num_cpus=1,
copy=True)
assert np.allclose(nfield[0][8][8],
0.9900406072155992+0.1341183159587472j)
assert np.allclose(nfield[0][2][8],
0.9947454248517085+0.11020637810883656j)
assert np.allclose(np.mean(ds), -4.8240740740740735)
assert np.allclose(np.array(ds), np.mean(ds))
assert np.allclose(np.array(ds), ds_same)
assert np.allclose(nfield, nfield_same)
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
# crossCorrelationPIV.py
# ======================
#
# Particle image velocimetry (PIV) based on cross correlation. CLIJ does the PIV in all three dimensions individually.
# This allows fast analysis of optical flow with the drawback of not being super precised. It is assumed that CLIJ
# over estimates the flow a bit because of the dimension reduction.
#
# Author: Robert Haase, rhaase@mpi-cbg.de
# April 2019
#########################################
from ij import IJ;
from ij.gui import NewImage;
from net.haesleinhuepf.clij2 import CLIJ2;
from net.haesleinhuepf.clijx import CLIJx;
# load example image
imp = IJ.openImage("c:/structure/code/clij-docs/src/main/resources/blobs.tif");
imp.show();
IJ.run(imp, "32-bit", "");
vfXImp = NewImage.createFloatImage("vf", imp.getWidth(), imp.getHeight(), 1, NewImage.FILL_BLACK);
for x in range(100, 120):
for y in range(100, 120):
vfXImp.getProcessor().setf(x, y, 2);
clij2 = CLIJ2.getInstance();
clijx = CLIJx.getInstance();
# push images to GPU and create memory for vector field
input = clij2.push(imp);
vfraw = clij2.push(vfXImp);
vf = clij2.create(input);
shifted = clij2.create(input);
# make a smoothly changing warp
clij2.blur(vfraw, vf, 5, 5);
# apply vector field
clij2.applyVectorfield(input, vf, vf, shifted);
# analyse shift
vfXAnalysed = clij2.create(input);
vfYAnalysed = clij2.create(input);
clijx.particleImageVelocimetry2D(input, shifted, vfXAnalysed, vfYAnalysed, 5);
# show analysed vector field
clij2.show(vfXAnalysed, "vfXAnalysed");
clij2.show(vfYAnalysed, "vfYAnalysed");
# clean up
clij2.clear();
|
import math
import numpy as np
import string
import random
import json
import argparse
import torch as T
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import os
from transformers import *
import sys
sys.path.append('../')
from DataLoader.bucket_and_batch import bucket_and_batch
from model.BERT_NL import Encoder as BERT_NL_Encoder
from model.BERT_BiLSTM import Encoder as BERT_BiLSTM_Encoder
from model.BERT_BiLSTM_attn import Encoder as BERT_BiLSTM_attn_Encoder
from model.BERT_attn_BiLSTM import Encoder as BERT_attn_BiLSTM_Encoder
from model.BERT_attn_BiLSTM_attn import Encoder as BERT_attn_BiLSTM_attn_Encoder
from model.BERT_capsule_BiLSTM_attn import Encoder as BERT_capsule_BiLSTM_attn_Encoder
from model.BERT_capsule_BiLSTM_capsule import Encoder as BERT_capsule_BiLSTM_capsule_Encoder
from model.BERT_capsule import Encoder as BERT_capsule_Encoder
import modules.utils as utils
import modules.eval as eval
parser = argparse.ArgumentParser(description='Model Name')
parser.add_argument('--model', type=str, default="BERT_BiLSTM")
flags = parser.parse_args()
model_name = flags.model
print("\n\nTesting Model: {}\n\n".format(model_name))
model_dict = {'BERT_NL': BERT_NL_Encoder,
'BERT_BiLSTM': BERT_BiLSTM_Encoder,
'BERT_BiLSTM_attn': BERT_BiLSTM_attn_Encoder,
'BERT_attn_BiLSTM': BERT_attn_BiLSTM_Encoder,
'BERT_attn_BiLSTM_attn': BERT_attn_BiLSTM_attn_Encoder,
'BERT_capsule_BiLSTM_attn': BERT_capsule_BiLSTM_attn_Encoder,
'BERT_capsule_BiLSTM_capsule': BERT_capsule_BiLSTM_capsule_Encoder,
'BERT_capsule': BERT_capsule_Encoder}
Encoder = model_dict.get(model_name, BERT_BiLSTM_Encoder)
device = T.device('cuda' if T.cuda.is_available() else 'cpu')
print(device)
if device == T.device('cuda'):
T.set_default_tensor_type(T.cuda.FloatTensor)
else:
T.set_default_tensor_type(T.FloatTensor)
random.seed(101)
bnb = bucket_and_batch()
test_batch_size = 32
accu_step = 64/test_batch_size
max_grad_norm = 2
with open('../Processed_Data/test_data.json') as file:
data = json.load(file)
# print(len(data["tweets"]))
test_texts = data["tweets"]
test_labels = data["labels"]
test_binary_labels = data["binary_labels"]
with open('../Processed_Data/label_info.json') as file:
data = json.load(file)
labels2idx = data["labels2idx"]
binary_labels2idx = data["binary_labels2idx"]
label_weights = data["label_weights"]
binary_label_weights = data["binary_label_weights"]
idx2labels = {v: k for k, v in labels2idx.items()}
binary_idx2labels = {v: k for k, v in binary_labels2idx.items()}
label_weights_idx = [i for i in range(len(labels2idx))]
label_weights = [label_weights[idx2labels[id]] for id in label_weights_idx]
label_weights = T.tensor(label_weights).to(device)
binary_label_weights_idx = [0, 1]
binary_label_weights = [binary_label_weights[binary_idx2labels[id]]
for id in binary_label_weights_idx]
binary_label_weights = T.tensor(binary_label_weights).to(device)
model = Encoder(classes_num=len(labels2idx))
model = model.to(device)
parameter_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Parameter Count: ", parameter_count)
parameters = []
BERT_parameters = []
allowed_layers = [11, 10, 9, 8, 7, 6]
for name, param in model.named_parameters():
if "BERT" not in name:
parameters.append(param)
print(name)
print(param.size())
else:
for layer_num in allowed_layers:
layer_num = str(layer_num)
if ".{}.".format(layer_num) in name:
BERT_parameters.append(param)
print(name)
print(param.size())
break
def display(texts, predictions, labels, binary_predictions, binary_labels, label_masks):
global idx2labels
global binary_idx2labels
N = len(texts)
j = random.choice(np.arange(N).tolist())
display_text = texts[j]
display_prediction = idx2labels[predictions[j]]
display_gold = idx2labels[labels[j]]
if label_masks[j] == 0:
display_prediction = display_prediction+" (N\A)"
display_gold = "(unlabeled)"
display_binary_prediction = binary_idx2labels[binary_predictions[j]]
display_binary_gold = binary_idx2labels[binary_labels[j]]
print("\n\nExample Prediction\n")
print("Text: {}\n".format(display_text))
print("Prediction: {}, Gold: {}, Binary Prediction: {}, Binary Gold: {}\n".format(
display_prediction, display_gold, display_binary_prediction, display_binary_gold))
def predict(text_ids, labels, binary_labels, input_mask, label_mask, train=True):
global model
global label_weights
global binary_label_weights
with T.no_grad():
text_ids = T.tensor(text_ids).to(device)
labels = T.tensor(labels).to(device)
binary_labels = T.tensor(binary_labels).to(device)
input_mask = T.tensor(input_mask).float().to(device)
label_mask = T.tensor(label_mask).float().to(device)
if train:
model = model.train()
binary_logits, logits = model(text_ids, input_mask)
else:
model = model.eval()
binary_logits, logits = model(text_ids, input_mask)
binary_predictions = np.where(binary_logits.view(-1).detach().cpu().numpy() > 0.5, 1, 0)
predictions = T.argmax(logits, dim=-1).detach().cpu().numpy()
loss = utils.cross_entropy(model, logits, labels, binary_logits, binary_labels,
label_weights, binary_label_weights, label_mask)
T.cuda.empty_cache()
return predictions, binary_predictions, loss
test_batches_texts, test_batches_text_ids, \
test_batches_labels, test_batches_binary_labels, \
test_batches_mask, test_batches_label_masks = bnb.bucket_and_batch(
test_texts, test_labels, test_binary_labels, labels2idx, binary_labels2idx, test_batch_size)
print("Test batches loaded")
display_step = 100
example_display_step = 1 #500
patience = 5
print('Loading pre-trained weights for the model...')
checkpoint = T.load("../Model_Backup/{}.pt".format(model_name))
print(checkpoint["past epoch"])
model.load_state_dict(checkpoint['model_state_dict'])
print('\nRESTORATION COMPLETE\n')
batches_indices = [i for i in range(0, len(test_batches_texts))]
random.shuffle(batches_indices)
total_test_cost = 0
batch_labels = []
batch_binary_labels = []
batch_predictions = []
batch_binary_predictions = []
batch_label_masks = []
for i in range(0, len(test_batches_texts)):
#if i % display_step == 0:
#print("Testing Batch {}".format(i+1))
with T.no_grad():
predictions, binary_predictions, loss = predict(text_ids=test_batches_text_ids[i],
labels=test_batches_labels[i],
binary_labels=test_batches_binary_labels[i],
input_mask=test_batches_mask[i],
label_mask=test_batches_label_masks[i],
train=False)
cost = loss.item()
total_test_cost += cost
predictions = predictions.tolist()
binary_predictions = binary_predictions.tolist()
labels = test_batches_labels[i].tolist()
binary_labels = test_batches_binary_labels[i].tolist()
label_masks = test_batches_label_masks[i].tolist()
batch_labels += labels
batch_binary_labels += binary_labels
batch_predictions += predictions
batch_binary_predictions += binary_predictions
batch_label_masks += label_masks
if i % example_display_step == 0:
display(test_batches_texts[i],
predictions, labels,
binary_predictions, binary_labels,
label_masks)
binary_prec, binary_rec, binary_acc = eval.binary_metrics(batch_binary_predictions,
batch_binary_labels)
prec, rec, acc = eval.multi_metrics(
batch_predictions, batch_labels, batch_label_masks, idx2labels)
binary_test_F1 = eval.compute_F1(binary_prec, binary_rec)
test_F1 = eval.compute_F1(prec, rec)
test_len = len(test_batches_texts)
avg_test_cost = total_test_cost/test_len
print("\n\nTESTING\n\n")
print("Cost = " +
"{:.3f}".format(avg_test_cost)+", Binary Precision = " +
"{:.3f}".format(binary_prec)+", Binary Recall = " +
"{:.3f}".format(binary_rec)+", Binary F1 = " +
"{:.3f}".format(binary_test_F1)+", Binary Accuracy = " +
"{:.3f}".format(binary_acc)+", Multi-Precision = " +
"{:.3f}".format(prec)+", Multi-Recall = " +
"{:.3f}".format(rec)+", Multi-F1 = " +
"{:.3f}".format(test_F1)+", Multi-Accuracy = " +
"{:.3f}".format(acc))
|
'''Returns a fact to indicate if this is a physical or virtual machine'''
# sysctl function by Michael Lynn
# https://gist.github.com/pudquick/581a71425439f2cf8f09
from __future__ import absolute_import, print_function
import plistlib
import subprocess
from ctypes import CDLL, c_uint, byref, create_string_buffer
from ctypes.util import find_library
libc = CDLL(find_library('c'))
def sysctl(name, is_string=True):
'''Wrapper for sysctl so we don't have to use subprocess'''
size = c_uint(0)
# Find out how big our buffer will be
libc.sysctlbyname(name, None, byref(size), None, 0)
# Make the buffer
buf = create_string_buffer(size.value)
# Re-run, but provide the buffer
libc.sysctlbyname(name, buf, byref(size), None, 0)
if is_string:
return buf.value
else:
return buf.raw
def is_virtual_machine():
'''Returns True if this is a VM, False otherwise'''
cpu_features = sysctl('machdep.cpu.features').split()
return 'VMM' in cpu_features
def get_machine_type():
'''Return the machine type: physical, vmware, virtualbox, parallels or
unknown_virtual'''
if not is_virtual_machine():
return 'physical'
# this is a virtual machine; see if we can tell which vendor
try:
proc = subprocess.Popen(['/usr/sbin/system_profiler', '-xml',
'SPEthernetDataType', 'SPHardwareDataType'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[0]
plist = plistlib.readPlistFromString(output)
br_version = plist[1]['_items'][0]['boot_rom_version']
if 'VMW' in br_version:
return 'vmware'
elif 'VirtualBox' in br_version:
return 'virtualbox'
else:
ethernet_vid = plist[0]['_items'][0]['spethernet_vendor-id']
if '0x1ab8' in ethernet_vid:
return 'parallels'
except (IOError, KeyError, OSError):
pass
return 'unknown_virtual'
def fact():
'''Return our physical_or_virtual fact'''
return {'physical_or_virtual': get_machine_type()}
if __name__ == '__main__':
print(fact())
|
"""
Feature preprocessing of data, such as expanding
categorical features to numerical ones.
"""
from sklearn.base import ClassifierMixin, BaseEstimator, TransformerMixin
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
import numpy as np
class ColumnSelector(BaseEstimator, TransformerMixin):
"""Selects a single column with index `key` from some matrix X"""
def __init__(self, key, row_space=True, as_matrix=True):
self.key = key
self.row_space = row_space
self.as_matrix = as_matrix
def fit(self, X, y=None):
return self # do nothing during fitting procedure
def transform(self, data_matrix):
# return a matrix with single column
if self.row_space:
R = data_matrix[:, [self.key]] # eg numpy array
else:
R = data_matrix[[self.key]] # eg pandas dataframe
R = np.array(R)
if not self.as_matrix:
R = R[:, 0]
return R
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Wrapper around LabelBinarizer. Assumes that input X to fit and transform is a single
column matrix of categorical values."""
def fit(self, X, y=None):
# create label encoder
M = [x[0] for x in X]
self.encoder = LabelBinarizer()
self.encoder.fit(M)
return self
def transform(self, X, y=None):
return self.encoder.transform(X[:,0])
class IntegerEncoder(BaseEstimator, TransformerMixin):
"""Wrapper around LabelBinarizer. Assumes that input X to fit and transform is a single
column matrix of categorical values."""
def fit(self, X, y=None):
# create label encoder
M = X[:, 0]
self.encoder = LabelEncoder()
self.encoder.fit(M)
return self
def transform(self, X, y=None):
return self.encoder.transform(X[:,0])[:, np.newaxis] |
from sqlalchemy import Column, String, Integer, Boolean
from honeypot_detection.database.base import Base
from honeypot_detection.database.dictionary import Dictionary
class HoneyBadgerLabel(Dictionary):
__tablename__ = "honey_badger_labels"
class HoneyBadgerNormalizedContractLabel(Base):
__tablename__ = "honey_badger_normalized_contract_labels"
address = Column(String(length=42), primary_key=True, autoincrement=False) # fixed size
honey_badger_label_id = Column(Integer())
evaluation_positive = Column(Boolean())
class HoneyBadgerContractLabel(Base):
__tablename__ = "honey_badger_contract_labels"
address = Column(String(length=42), primary_key=True, autoincrement=False) # fixed size
honey_badger_label_id = Column(Integer())
evaluation_positive = Column(Boolean())
|
from django.test.testcases import TestCase
from .utils import create_group_community
class GroupModelTest(TestCase):
def test_grouprules_created(self):
gp = create_group_community()
self.assertIsNotNone(
gp.rules, msg='Rules not created')
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 15 16:04:10 2019
@author: danpo
This script compares the CFF assay's waveform alignment when I use two strategies
for finding on/off windows: with markers for on, and without markers for on.
"""
import importlib
import erg
import erg.plotting as plotting
importlib.reload(erg)
importlib.reload(erg.plotting)
import numpy as np
import matplotlib.pyplot as plt
wf_w_markers = r'C:\Users\danpo\Documents\BYB\BYB_Recording_2019-12-09_23.50.47.wav'
text_w_markers = r'C:\Users\danpo\Documents\BYB\BYB_Recording_2019-12-09_23.50.47-events.txt'
ergram = erg.ERG(wf_w_markers)
p = plotting.Plotting(ergram)
|
from jnpr.junos.op.phyport import *
from jnpr.junos import Device
dev = Device( user='netconf', host='172.16.0.1', password='test123' )
dev.open()
ports = PhyPortTable(dev).get()
print "Port,Status,Flapped" #Print Header for CSV
for port in ports:
print("%s,%s,%s" % (port.key, port.oper, port.flapped))
print "\n\n"
|
"""
DB Model for Likes and
relevant junction tables
"""
import datetime
from sqlalchemy.sql import and_, select
from app.main import db, login_manager
class Reaction(db.Model):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:value: int [pk]
:user_id: int [Foreign Key -> User.id]
:post_id: int [Foreign Key -> Post.id]
:comment_id: int [Foreign Key -> Comment.id]
:creation_time: DateTime [not NULL]
"""
# Columns
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
base_id = db.Column(db.Integer, db.ForeignKey("base.id"))
creation_time = db.Column(db.DateTime, default=datetime.datetime.now())
def __init__(self, value, user_id, post_id):
self.value = value
self.user_id = user_id
self.post_id = post_id
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit()
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
class RMSNorm(nn.Module):
def __init__(self, d, p=-1., eps=1e-8, bias=False):
"""
Root Mean Square Layer Normalization
:param d: model size
:param p: partial RMSNorm, valid value [0, 1], default -1.0 (disabled)
:param eps: epsilon value, default 1e-8
:param bias: whether use bias term for RMSNorm, disabled by
default because RMSNorm doesn't enforce re-centering invariance.
"""
super(RMSNorm, self).__init__()
self.eps = eps
self.d = d
self.p = p
self.bias = bias
self.scale = nn.Parameter(torch.ones(d))
self.register_parameter("scale", self.scale)
if self.bias:
self.offset = nn.Parameter(torch.zeros(d))
self.register_parameter("offset", self.offset)
def forward(self, x):
if self.p < 0. or self.p > 1.:
norm_x = x.norm(2, dim=-1, keepdim=True)
d_x = self.d
else:
partial_size = int(self.d * self.p)
partial_x, _ = torch.split(x, [partial_size, self.d - partial_size], dim=-1)
norm_x = partial_x.norm(2, dim=-1, keepdim=True)
d_x = partial_size
rms_x = norm_x * d_x ** (-1. / 2)
x_normed = x / (rms_x + self.eps)
if self.bias:
return self.scale * x_normed + self.offset
return self.scale * x_normed
|
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def generate_dataset(D, N, K, mu0, sigma0, sigma):
z = np.random.randint(K, size=N)
mu = np.random.multivariate_normal(mean=mu0, cov=sigma0, size=K)
X = np.zeros((N, D))
for i in xrange(N):
X[i] = np.random.multivariate_normal(mean=mu[z[i]], cov=sigma)
return X, z
def plot_dataset(X, z, K):
colors = sns.color_palette(n_colors=K)
for k in xrange(K):
x, y = X[z == k].T
plt.scatter(x, y, color=colors[k])
|
# --------------------------------------------------------
# ImageNet-21K Pretraining for The Masses
# Copyright 2021 Alibaba MIIL (c)
# Licensed under MIT License [see the LICENSE file for details]
# Written by Tal Ridnik
# --------------------------------------------------------
import os
import urllib
from argparse import Namespace
import torch
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from src_files.semantic.semantics import ImageNet21kSemanticSoftmax
import timm
############### Downloading metadata ##############
print("downloading metadata...")
url, filename = (
"https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/resources/imagenet21k_miil_tree.pth",
"imagenet21k_miil_tree.pth")
if not os.path.isfile(filename):
urllib.request.urlretrieve(url, filename)
args = Namespace()
args.tree_path = filename
semantic_softmax_processor = ImageNet21kSemanticSoftmax(args)
print("done")
############### Loading (ViT) model from timm package ##############
print("initilizing model...")
model = timm.create_model('vit_base_patch16_224_miil_in21k', pretrained=True)
model.eval()
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
print("done")
############## Loading sample image ##############
print("downloading sample image...")
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
if not os.path.isfile(filename):
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
print("done")
############## Doing semantic inference ##############
print("doing semantic infernce...")
labels = []
with torch.no_grad():
logits = model(tensor)
semantic_logit_list = semantic_softmax_processor.split_logits_to_semantic_logits(logits)
# scanning hirarchy_level_list
for i in range(len(semantic_logit_list)):
logits_i = semantic_logit_list[i]
# generate probs
probabilities = torch.nn.functional.softmax(logits_i[0], dim=0)
top1_prob, top1_id = torch.topk(probabilities, 1)
if top1_prob > 0.5:
top_class_number = semantic_softmax_processor.hierarchy_indices_list[i][top1_id[0]]
top_class_name = semantic_softmax_processor.tree['class_list'][top_class_number]
top_class_description = semantic_softmax_processor.tree['class_description'][top_class_name]
labels.append(top_class_description)
print("labels found {}.".format(labels))
############## Visualization ##############
import matplotlib
import os
import numpy as np
if os.name == 'nt':
matplotlib.use('TkAgg')
else:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.imshow(img)
plt.axis('off')
plt.title('Semantic labels found: \n {}'.format(np.array(labels)))
plt.show()
|
# Generated by Django 2.1.5 on 2019-05-10 21:24
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_title', models.CharField(max_length=20)),
('project_content', models.TextField()),
('project_published', models.DateTimeField(default=datetime.datetime(2019, 5, 10, 17, 24, 7, 785063), verbose_name='date published')),
],
),
migrations.DeleteModel(
name='Projects',
),
]
|
from sympy import partition as p
from time import time
from rich import print
t1 = time()
n = 10000
while p(n) % 1_000_000 != 0:
n += 1
print(n)
print(f"Process completed in {time()-t1}s")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-13 08:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('account', '0002_fix_str'),
]
operations = [
migrations.CreateModel(
name='PasswordExpiry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expiry', models.PositiveIntegerField(default=0)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='password_expiry', to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
),
migrations.CreateModel(
name='PasswordHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=255)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='password_history', to=settings.AUTH_USER_MODEL)),
],
),
]
|
# ==============================================================================
# Copyright 2019 - Philip Paquette
#
# NOTICE: Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# ==============================================================================
""" Reinforcement - Generation
- Class responsible for generating games
"""
import collections
import datetime
import logging
import multiprocessing
from queue import Empty as EmptyQueueException
import random
import sys
from threading import Thread
import time
import traceback
from tornado import gen, ioloop
from diplomacy_research.models.self_play.controller import generate_trajectory
from diplomacy_research.models.state_space import ALL_POWERS, NB_POWERS
from diplomacy_research.models.training.memory_buffer import MemoryBuffer, general_ops
from diplomacy_research.models.training.memory_buffer.expert_games import get_uniform_initial_states, \
get_backplay_initial_states
from diplomacy_research.models.training.memory_buffer.online_games import save_games
from diplomacy_research.models.training.memory_buffer import priority_replay
from diplomacy_research.models.training.reinforcement.common import get_nb_rl_agents
from diplomacy_research.models.training.reinforcement.players import get_train_supervised_players, \
get_train_self_play_players, get_train_staggered_players
from diplomacy_research.models.training.reinforcement.serving import get_tf_serving_port
from diplomacy_research.utils.proto import write_bytes_to_file, proto_to_bytes
# Constants
LOGGER = logging.getLogger(__name__)
SPAWN_CONTEXT = multiprocessing.get_context('spawn')
NEW_VERSION = 'new.version'
class AggregatorConfig(
collections.namedtuple('AggregatorConfig', ('nb_left', # The number of games left to generate
'nb_total', # The total number of games to generate
'file_path', # The file path where to save games
'save_to_buffer'))): # Indicate to save the game to mem. buffer
""" Configuration to pass to the aggregator on a new set of games """
@gen.coroutine
def generate_game_on_process(get_players_callable, get_players_kwargs, generate_trajectory_kwargs, queue):
""" Generate a game on the current process
:param get_players_callable: Callable function to get a list of players
:param get_players_kwargs: A dictionary of kwargs to pass to the get_players_callable
:param generate_trajectory_kwargs: A dictionary of kwargs to pass to the method generate_trajectory
:param queue: A multiprocessing queue to save games to disk and display progress.
:return: A saved game in bytes format.
:type queue: multiprocessing.Queue
"""
assert 'tensorflow' not in sys.modules, 'TensorFlow should not be loaded for generate games.'
players = get_players_callable(**get_players_kwargs)
saved_game_bytes = yield generate_trajectory(players, **generate_trajectory_kwargs, output_format='bytes')
queue.put_nowait((True, 0, saved_game_bytes)) # is_full_game, nb_transitions, saved_game_bytes
return saved_game_bytes
def start_game_process(kwargs):
""" Starts an IO loop for game generation """
try:
return ioloop.IOLoop().run_sync(lambda: generate_game_on_process(**kwargs)) # pylint: disable=no-value-for-parameter
except KeyboardInterrupt:
pass
except: # pylint: disable=bare-except
LOGGER.error('-' * 32)
LOGGER.error('Exception occurred in process pool.')
traceback.print_exc()
LOGGER.error('-' * 32)
return None
def start_game_generator(adapter_ctor, dataset_builder_ctor, reward_fn, advantage_fn, hparams, cluster_config,
process_pool, games_queue, transitions_queue):
""" Start the game generator (to generate an infinite number of training games)
:param adapter_ctor: The constructor to build the adapter to query orders, values and policy details
:param dataset_builder_ctor: The constructor of `BaseBuilder` to set the required proto fields
:param reward_fn: The reward function to use (Instance of.models.self_play.reward_functions`).
:param advantage_fn: An instance of `.models.self_play.advantages`
:param hparams: A dictionary of hyper-parameters
:param cluster_config: The cluster configuration to use for distributed training
:param process_pool: Optional. A ProcessPoolExecutor that was forked before TF and gRPC were loaded.
:param games_queue: Queue to be used by processes to send games to the aggregator.
:param transitions_queue: Inbound queue to receive the number of transitions and version updates.
:return: Nothing
:type adapter_ctor: diplomacy_research.models.policy.base_policy_adapter.BasePolicyAdapter.__class__
:type dataset_builder_ctor: diplomacy_research.models.datasets.base_builder.BaseBuilder.__class__
:type reward_fn: diplomacy_research.models.self_play.reward_functions.AbstractRewardFunction
:type advantage_fn: diplomacy_research.models.self_play.advantages.base_advantage.BaseAdvantage
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
:type process_pool: diplomacy_research.utils.executor.ProcessPoolExecutor
:type games_queue: multiprocessing.Queue
:type transitions_queue: multiprocessing.Queue
"""
# pylint: disable=too-many-arguments
memory_buffer = MemoryBuffer(cluster_config, hparams)
nb_cores = multiprocessing.cpu_count()
futures = []
nb_pending_transitions = 0 # For throttling if there are enough transitions for the learner
nb_rl_agents = get_nb_rl_agents(hparams['mode'])
# 1) Finding the right function to create player
get_players_callable = {'supervised': get_train_supervised_players,
'self-play': get_train_self_play_players,
'staggered': get_train_staggered_players}[hparams['mode']]
# Generating an infinite number of games
while True:
nb_new_transitions = 0
# 1) Detecting the number of pending transactions to throttle if necessary
while not transitions_queue.empty():
item = transitions_queue.get()
if item == NEW_VERSION:
nb_pending_transitions = 0
nb_cores = multiprocessing.cpu_count()
else:
nb_pending_transitions += nb_rl_agents * item / NB_POWERS
nb_new_transitions += nb_rl_agents * item / NB_POWERS
futures = [fut for fut in futures if not fut.done()]
nb_games_being_generated = len(futures)
# Finding the number of games to generate
nb_new_games = nb_cores - nb_games_being_generated
if nb_new_games <= 0:
continue
# 2) Generating the get_player_kwargs
get_players_kwargs = [{'adapter_ctor': adapter_ctor,
'dataset_builder_ctor': dataset_builder_ctor,
'tf_serving_port': get_tf_serving_port(cluster_config, serving_id=0),
'cluster_config': cluster_config,
'hparams': hparams}] * nb_new_games
# 3) Generating gen_trajectory_kwargs
gen_trajectory_kwargs = []
for _ in range(nb_new_games):
gen_trajectory_kwargs += [{'hparams': hparams,
'reward_fn': reward_fn,
'advantage_fn': advantage_fn,
'power_assignments': hparams.get('power', '') or random.choice(ALL_POWERS),
'set_player_seed': bool(hparams['dropout_rate']),
'update_interval': hparams['update_interval'],
'update_queue': games_queue}]
# 4) Adding initial states if required
if hparams['start_strategy'] == 'uniform':
initial_states_proto = get_uniform_initial_states(memory_buffer, nb_new_games)
for game_ix, initial_state_proto in enumerate(initial_states_proto):
gen_trajectory_kwargs[game_ix]['initial_state_bytes'] = proto_to_bytes(initial_state_proto)
elif hparams['start_strategy'] == 'backplay':
winning_power_names = []
for kwargs in gen_trajectory_kwargs:
if isinstance(kwargs['power_assignments'], list):
winning_power_names += [kwargs['power_assignments'][0]]
else:
winning_power_names += [kwargs['power_assignments']]
version_id = general_ops.get_version_id(memory_buffer)
initial_states_proto = get_backplay_initial_states(memory_buffer, winning_power_names, version_id)
for game_ix, initial_state_proto in enumerate(initial_states_proto):
gen_trajectory_kwargs[game_ix]['initial_state_bytes'] = proto_to_bytes(initial_state_proto)
# 6) Launching jobs using current pool
tasks = []
for player_kwargs, trajectory_kwargs in zip(get_players_kwargs, gen_trajectory_kwargs):
tasks += [{'get_players_callable': get_players_callable,
'get_players_kwargs': player_kwargs,
'generate_trajectory_kwargs': trajectory_kwargs,
'queue': games_queue}]
futures += [process_pool.submit(start_game_process, kwargs) for kwargs in tasks]
def start_aggregator(hparams, cluster_config, games_queue, transitions_queue, display_status=True):
""" Games Aggregator - Displays status every minute and saves games to disk and to memory buffer
:param hparams: A dictionary of hyper-parameters
:param cluster_config: The cluster configuration to use for distributed training
:param games_queue: Inbound queue to receive games from the process pool.
:param transitions_queue: Outbound queue to send transitions and version updates to the generator for throttling
:param display_status: Boolean that indicates to display the status on stdout.
:return: Nothing
:type cluster_config: diplomacy_research.utils.cluster.ClusterConfig
:type games_queue: multiprocessing.Queue
:type transitions_queue: multiprocessing.Queue
"""
# pylint: disable=too-many-nested-blocks
memory_buffer = MemoryBuffer(cluster_config, hparams)
buffer_file = None
current_config = None # type: AggregatorConfig
new_config = None
nb_left, nb_total, nb_completed = 0, 0, 0
starting_time = time.time()
last_status_time = time.time()
queue_is_done = False
last_version = -1
# Looping forever - New configs will be sent on the queue
while not queue_is_done:
nb_full_games = 0
# Dequeuing items from the queue
saved_games_bytes = []
while True:
try:
item = games_queue.get(False)
if item is None:
queue_is_done = True
break
elif isinstance(item, AggregatorConfig):
new_config = item
break
else:
is_full_game, nb_transitions, saved_game_bytes = item
saved_games_bytes += [saved_game_bytes]
if is_full_game:
nb_full_games += 1
# On a new version, we send 'new.version' on the transition queue
# We also send the nb of transitions, so the generator can throttle when there is enough transitions
# for the current version
if transitions_queue and nb_transitions:
current_version = general_ops.get_version_id(memory_buffer)
if current_version != last_version:
transitions_queue.put_nowait(NEW_VERSION)
last_version = current_version
transitions_queue.put_nowait(nb_transitions)
except EmptyQueueException:
break
# Processing current games
if saved_games_bytes:
if current_config.save_to_buffer:
save_games(memory_buffer, saved_games_bytes=saved_games_bytes)
for saved_game_bytes in saved_games_bytes:
if buffer_file:
write_bytes_to_file(buffer_file, saved_game_bytes)
if nb_left > 0:
nb_left -= 1
nb_completed += 1
# Printing status
# Case 1 - Generate an infinite number of games
if nb_total == -1 and time.time() - last_status_time >= 60:
last_status_time = time.time()
elapsed_time = last_status_time - starting_time
games_per_day = 24 * 3600. * nb_full_games / max(1., elapsed_time)
if display_status:
LOGGER.info('Current status - Games/day: %.2f', games_per_day)
# Case 2 - Generate a finite number of games
elif nb_left > 0 and time.time() - last_status_time >= 60:
last_status_time = time.time()
elapsed_time = last_status_time - starting_time
progress = 100. * (nb_total - nb_left) / max(1, nb_total)
eta = datetime.timedelta(seconds=int(elapsed_time / max(1, nb_completed * nb_left)))
if display_status:
LOGGER.info('Current status - %.2f%% (%d/%d) - ETA: %s', progress, nb_total - nb_left, nb_total, eta)
# Done current batch
if nb_total and not nb_left:
progress = 100. * (nb_total - nb_left) / max(1, nb_total)
if display_status:
LOGGER.info('Done generating games - Progress %.2f%% (%d/%d)', progress, nb_total - nb_left, nb_total)
nb_left, nb_total, nb_completed = 0, 0, 0
if buffer_file:
buffer_file.close()
buffer_file = None
# Setting new config
if new_config:
starting_time = time.time()
nb_left, nb_total, nb_completed = new_config.nb_left, new_config.nb_total, 0
if buffer_file:
buffer_file.close()
buffer_file = None
if new_config.file_path:
buffer_file = open(new_config.file_path, 'ab')
current_config = new_config
if nb_total > 0:
progress = 100. * (nb_total - nb_left) / max(1, nb_total)
if display_status:
LOGGER.info('Generating games - Progress %.2f%% (%d/%d)', progress, nb_total - nb_left, nb_total)
# Sleeping
if not saved_games_bytes and new_config is None:
time.sleep(0.25)
new_config = None
def start_training_process(trainer, block=False):
""" Starts a process that will generate an infinite number of training games
:param trainer: A reinforcement learning trainer instance.
:param block: Boolean that indicates that we want to block until the process is completed
:type trainer: diplomacy_research.models.training.reinforcement.trainer.ReinforcementTrainer
"""
# Starting aggregator and game generator
if not trainer.aggregator['train']:
# The to_aggregator queue is used to send games from the process pool to the aggregator
# The to_generator queue is used to send transitions from the aggregator to the generator for throttling
manager = multiprocessing.Manager()
queue_to_aggregator = manager.Queue()
queue_to_generator = manager.Queue()
# Putting configuration for aggregator -- -1 = infinite games
queue_to_aggregator.put_nowait(AggregatorConfig(nb_left=-1,
nb_total=-1,
file_path=None,
save_to_buffer=True))
# Aggregator - Using separate process
trainer.aggregator['train'] = SPAWN_CONTEXT.Process(target=start_aggregator,
kwargs={'hparams': trainer.hparams,
'cluster_config': trainer.cluster_config,
'games_queue': queue_to_aggregator,
'transitions_queue': queue_to_generator,
'display_status': bool(trainer.cluster_config)})
trainer.aggregator['train'].start()
# Generator - Using thread
trainer.train_thread = Thread(target=start_game_generator,
kwargs={'adapter_ctor': trainer.adapter_constructor,
'dataset_builder_ctor': trainer.dataset_builder_constructor,
'reward_fn': trainer.reward_fn,
'advantage_fn': trainer.advantage_fn,
'hparams': trainer.hparams,
'cluster_config': trainer.cluster_config,
'process_pool': trainer.process_pool,
'games_queue': queue_to_aggregator,
'transitions_queue': queue_to_generator})
trainer.train_thread.start()
# Blocking until thread completes
if block:
trainer.train_thread.join()
def get_replay_samples(trainer):
""" Retrives a series of replay samples (to use for learning)
:param trainer: A reinforcement learning trainer instance.
:return: A list of `ReplaySample`.
:type trainer: diplomacy_research.models.training.reinforcement.trainer.ReinforcementTrainer
"""
# Computing the number of samples - Rounding them to the nearest batch size
perc_from_replay = 0.
if trainer.algorithm_constructor.can_do_experience_replay:
perc_from_replay = max(0., min(1., trainer.flags.experience_replay))
nb_samples = perc_from_replay * trainer.flags.nb_transitions_per_update
nb_samples = int(trainer.flags.batch_size * round(nb_samples / trainer.flags.batch_size))
if not nb_samples:
return []
return priority_replay.get_replay_samples(trainer.memory_buffer, nb_samples)
|
"""
Test cases for the L{xmantissa.webadmin} module.
"""
from twisted.trial.unittest import TestCase
from nevow.athena import LivePage
from nevow.context import WovenContext
from nevow.testutil import FakeRequest
from nevow.loaders import stan
from nevow.tags import html, head, body, directive
from nevow.inevow import IRequest
from axiom.store import Store
from axiom.userbase import LoginSystem, LoginMethod
from axiom.dependency import installOn
from axiom.plugins.mantissacmd import Mantissa
from xmantissa.webadmin import (
LocalUserBrowser, LocalUserBrowserFragment,
UserInteractionFragment, EndowFragment, DepriveFragment,
SuspendFragment, UnsuspendFragment)
from xmantissa.product import Product
class UserInteractionFragmentTestCase(TestCase):
def setUp(self):
"""
Create a site store and a user store with a L{LocalUserBrowser}
installed on it.
"""
self.siteStore = Store()
self.loginSystem = LoginSystem(store=self.siteStore)
installOn(self.loginSystem, self.siteStore)
self.userStore = Store()
self.userStore.parent = self.siteStore
self.browser = LocalUserBrowser(store=self.userStore)
def test_createUser(self):
"""
Test that L{webadmin.UserInteractionFragment.createUser} method
actually creates a user.
"""
userInteractionFragment = UserInteractionFragment(self.browser)
userInteractionFragment.createUser(
u'testuser', u'localhost', u'password')
account = self.loginSystem.accountByAddress(u'testuser', u'localhost')
self.assertEquals(account.password, u'password')
def test_rendering(self):
"""
Test that L{webadmin.UserInteractionFragment} renders without raising
any exceptions.
"""
f = UserInteractionFragment(self.browser)
p = LivePage(
docFactory=stan(
html[
head(render=directive('liveglue')),
body(render=lambda ctx, data: f)]))
f.setFragmentParent(p)
ctx = WovenContext()
req = FakeRequest()
ctx.remember(req, IRequest)
d = p.renderHTTP(ctx)
def rendered(ign):
p.action_close(None)
d.addCallback(rendered)
return d
class ActionsTestCase(TestCase):
"""
Tests to verify that actions behave as expected.
@ivar siteStore: A site store containing an administrative user's account.
@ivar siteAccount: The L{axiom.userbase.LoginAccount} for the
administrator, in the site store.
@ivar siteMethod: The single L{axiom.userbase.LoginMethod} for the
administrator, in the site store.
@ivar localUserBrowserFragment: A L{LocalUserBrowserFragment} examining the
administrator's L{LocalUserBrowser} powerup.
"""
def setUp(self):
"""
Construct a site and user store with an administrator that can invoke the
web administrative tools, setting the instance variables described in
this class's docstring.
"""
self.siteStore = Store(filesdir=self.mktemp())
Mantissa().installSite(self.siteStore, u"localhost", u"", False)
Mantissa().installAdmin(self.siteStore, u'admin', u'localhost', u'asdf')
self.siteMethod = self.siteStore.findUnique(
LoginMethod, LoginMethod.localpart == u'admin')
self.siteAccount = self.siteMethod.account
userStore = self.siteAccount.avatars.open()
lub = userStore.findUnique(LocalUserBrowser)
self.localUserBrowserFragment = LocalUserBrowserFragment(lub)
def test_actionTypes(self):
"""
Verify that all the action methods expose the appropriate fragment
objects, with their attributes set to indicate the correct objects to
manipulate.
"""
myRowID = self.localUserBrowserFragment.linkToItem(self.siteMethod)
actionMap = [('installOn', EndowFragment),
('uninstallFrom', DepriveFragment),
('suspend', SuspendFragment),
('unsuspend', UnsuspendFragment)]
for action, fragmentType in actionMap:
resultFragment = self.localUserBrowserFragment.performAction(
action, myRowID)
self.failUnless(isinstance(resultFragment, fragmentType),
"%s does not return a %s" %
(action, fragmentType))
self.assertEquals(resultFragment.fragmentParent,
self.localUserBrowserFragment)
self.assertEquals(resultFragment.account, self.siteAccount)
class RenderingTestCase(TestCase):
"""
Test cases for HTML rendering of various fragments.
"""
def doRendering(self, fragmentClass):
"""
Verify that the given fragment class will render without raising an
exception.
"""
siteStore = Store()
loginSystem = LoginSystem(store=siteStore)
installOn(loginSystem, siteStore)
p = Product(store=siteStore, types=["xmantissa.webadmin.LocalUserBrowser",
"xmantissa.signup.SignupConfiguration"])
account = loginSystem.addAccount(u'testuser', u'localhost', None)
p.installProductOn(account.avatars.open())
f = fragmentClass(None, u'testuser', account)
p = LivePage(
docFactory=stan(
html[
head(render=directive('liveglue')),
body(render=lambda ctx, data: f)]))
f.setFragmentParent(p)
ctx = WovenContext()
req = FakeRequest()
ctx.remember(req, IRequest)
d = p.renderHTTP(ctx)
def rendered(ign):
p.action_close(None)
d.addCallback(rendered)
return d
def test_endowRendering(self):
"""
Verify that L{EndowFragment} can render without raising an exception.
"""
return self.doRendering(EndowFragment)
def test_depriveRendering(self):
"""
Verify that L{DepriveFragment} can render without raising an exception.
"""
return self.doRendering(DepriveFragment)
def test_suspendRendering(self):
"""
Verify that L{SuspendFragment} can render without raising an exception.
"""
return self.doRendering(SuspendFragment)
def test_unsuspendRendering(self):
"""
Verify that L{UnsuspendFragment} can render without raising an
exception.
"""
return self.doRendering(UnsuspendFragment)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, datetime
from django.db import migrations
from django.conf import settings
def rename_x3s3dot3_forwards(apps, schema_editor):
Group = apps.get_model("group", "Group")
Group.objects.filter(acronym="x3s3.3").update(acronym="x3s3dot3")
def rename_x3s3dot3_backwards(apps, schema_editor):
Group = apps.get_model("group", "Group")
Group.objects.filter(acronym="x3s3dot3").update(acronym="x3s3.3")
def get_rid_of_empty_charters(apps, schema_editor):
Group = apps.get_model("group", "Group")
for acronym in ["fun", "multrans", "cicm", "woes", "dcon", "sdn", "i2aex", "rpsreqs", "antitrust", "iprbis", "dsii"]:
group = Group.objects.get(acronym=acronym)
if group.charter:
charter = group.charter
# clean up any empty files left behind
revisions = set()
revisions.add(charter.rev)
for h in charter.history_set.all():
revisions.add(h.rev)
for rev in revisions:
path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.name, rev))
try:
if os.path.exists(path):
with open(path, 'r') as f:
if f.read() == "":
os.remove(path)
except IOError:
pass
group.charter = None
group.save()
charter.delete()
def fix_empty_rrg_charter(apps, schema_editor):
Document = apps.get_model("doc", "Document")
DocEvent = apps.get_model("doc", "DocEvent")
NewRevisionDocEvent = apps.get_model("doc", "NewRevisionDocEvent")
Person = apps.get_model("person", "Person")
State = apps.get_model("doc", "State")
charter = Document.objects.get(name="charter-irtf-rrg")
system = Person.objects.get(name="(System)")
if charter.rev == "00-00":
charter.rev = "01"
charter.time = datetime.datetime.now()
charter.save()
NewRevisionDocEvent.objects.create(
rev=charter.rev,
doc=charter,
type="new_revision",
by=system,
desc="New version available: <b>%s-%s.txt</b>" % (charter.name, charter.rev),
time=charter.time,
)
DocEvent.objects.create(
doc=charter,
type="added_comment",
by=system,
desc="Added existing charter",
time=charter.time,
)
approved = State.objects.get(type="charter", slug="approved")
already_set = list(charter.states.filter(type="charter"))
if already_set:
charter.states.remove(*already_set)
charter.states.add(approved)
path = os.path.join(settings.CHARTER_PATH, '%s-%s.txt' % (charter.name, charter.rev))
with open(path, "w") as f:
f.write("""The Routing Research Group (RRG) is chartered to explore routing and addressing problems that are important to the development of the Internet but are not yet mature enough for engineering work within the IETF. As the Internet continues to evolve, the challenges in providing a scalable and robust global routing system will also change over time. At the moment, the Internet routing and addressing architecture is facing challenges in scalability, mobility, multi-homing, and inter-domain traffic engineering. Thus the RRG proposes to focus its effort on designing an alternate architecture to meet these challenges. Although Internet routing is a broad and active research area, a focused effort at this time is necessary to assure rapid progress towards reaching the goal.
More specifically, we propose to explore architectural alternatives, including, but not limited to, separating host location and identification information. Research and experimentation in addressing and routing algorithms will be encouraged to understand whether this new direction can provide effective solutions, to work out candidate designs as necessary for a complete solution, and to fully understand both the gains and the tradeoffs that the new solutions may bring. The group will produce a list of prioritized design goals and a recommendation for a routing and addressing architecture.
The RRG will have an open general discussion mailing list where any topic of interest to the routing research community can be discussed, and topics related to scalable routing architectures are particularly encouraged. For specific topics with widespread discussion, interested parties will be encouraged to form ad-hoc mailing lists, with summaries sent to the general mailing list quarterly. Summaries will contain the recent conclusions reached as well as the near-term agenda for future progress.
It is commonly recognized that productive design efforts can be carried out by small and focused design teams. The RRG encourages the formation of focused design teams to explore specific design choices. As with ad-hoc mailing lists, individual design teams are required to report back quarterly to the RRG with their progress and remaining open issues. Each design team is expected to produce a set of Internet Drafts that documents their current thinking.
The RRG, as a whole, will hold open meetings from time to time to solicit input from, and supply information to, the broader community. In particular, at least once per year there will be a review of the group's activities held at an IETF meeting. More frequent meetings will be held if it will speed group progress. Ad-hoc and design team meetings are strongly encouraged.
The output of the group will consist of Informational and Experimental RFCs as well as Journal Articles on the topics covered by the subgroups.""")
def fix_cicm_state(apps, schema_editor):
Group = apps.get_model("group", "Group")
Group.objects.filter(acronym="cicm").update(state="bof-conc")
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('doc', '0010_auto_20150930_0251'),
('group', '0008_auto_20160505_0523'),
('community', '0004_cleanup_data'),
]
operations = [
migrations.RunPython(rename_x3s3dot3_forwards, rename_x3s3dot3_backwards),
migrations.RunPython(fix_empty_rrg_charter, noop),
migrations.RunPython(get_rid_of_empty_charters, noop),
migrations.RunPython(fix_cicm_state, noop),
]
|
"""The main graph kernel class, implemented as a sci-kit transformer."""
import copy
import time
import warnings
import numpy as np
from scipy.linalg import svd
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.utils.validation import check_is_fitted
from grakel.kernels import GraphletSampling
from grakel.kernels import RandomWalk
from grakel.kernels import RandomWalkLabeled
from grakel.kernels import ShortestPath
from grakel.kernels import ShortestPathAttr
from grakel.kernels import WeisfeilerLehman
from grakel.kernels import NeighborhoodHash
from grakel.kernels import PyramidMatch
from grakel.kernels import SubgraphMatching
from grakel.kernels import NeighborhoodSubgraphPairwiseDistance
from grakel.kernels import LovaszTheta
from grakel.kernels import SvmTheta
from grakel.kernels import OddSth
from grakel.kernels import Propagation
from grakel.kernels import PropagationAttr
from grakel.kernels import HadamardCode
from grakel.kernels import MultiscaleLaplacian
from grakel.kernels import MultiscaleLaplacianFast
from grakel.kernels import VertexHistogram
from grakel.kernels import EdgeHistogram
from grakel.kernels import GraphHopper
from grakel.kernels import CoreFramework
# Python 2/3 cross-compatibility import
from future.utils import iteritems
np.random.seed(int(time.time()))
supported_base_kernels = [
"subtree_wl", "random_walk",
"shortest_path",
"graphlet_sampling", "subgraph_matching",
"multiscale_laplacian",
"lovasz_theta", "svm_theta",
"neighborhood_hash", "neighborhood_subgraph_pairwise_distance",
"NSPDK",
"odd_sth", "propagation",
"pyramid_match",
"propagation", "vertex_histogram", "edge_histogram",
"graph_hopper"
]
supported_general_kernels = [
"weisfeiler_lehman",
"hadamard_code",
"core_framework"
]
default_verbose_value = True
default_random_seed_value = 42
default_n_components = 100
class GraphKernel(BaseEstimator, TransformerMixin):
r"""A decorator for graph kernels.
Parameters
----------
kernel : list(dict(key:str, value:value))
A list of dictionaries, or a single dictionary that has the following structure:
* "name" : [str] - with the kernel name
* "name_of_parameter_1" : value
* "name_of_parameter_2" : value
* :math:`\;\cdots\;`
* "name_of_parameter_k" : value
available "names" / "parametres" are:
1. base_kernels (the structure must always reach a base kernel)
- "random_walk"
+ (**o**) "with_labels" : bool
+ (**o**) "lamda" : float
+ (**o**) "method_type" : [str], "baseline", "fast"
+ (**o**) "kernel_type" : [str], "geometric", "exponential"
+ (**o**) "p" : [int] > 0
- "shortest_path"
+ (**o**) "algorithm_type" : [str] "dijkstra", "floyd_warshall"
+ (**o**) "as_attributes" : [bool]
+ (**o**) "attribute_kernel" : [function] : (attribute_x, attribute_y) -> number
+ (**o**) "with_labels" : [bool]
- "graphlet_sampling"
+ (**o**) "k" : [int]
+ (**o**) "sampling" : [dict] or **None**
- "multiscale_laplacian"
+ (**o**) "which" : [str] "slow", "fast"
+ (**o**) "L" : [int] > 0
+ (**o**) "gamma" : [float] > .0
+ (**o**) "heta" : [float] > .0
+ (**o**) "N" : [int] > 0, if "which": "fast"
+ (**o**) "P" : [int] > 0, if "which": "fast"
- "subgraph_matching"
+ (**o**) "kv" : [function] : (node_x, node_y, Lx, Ly) -> number
+ (**o**) "ke" : [function] : (edge_x, edge_y, Lx, Ly) -> number
+ (**o**) "lw" : a lambda weight function for cliques: set -> number
- "lovasz_theta"
+ (**o**) "n_samples" : [int] > 1
+ (**o**) "subsets_size_range" : [tuple] of two [int]
+ (**o**) "metric" : [function] (number, number) -> number
- "svm_theta"
+ (**o**) "n_samples" : [int] > 1
+ (**o**) "subsets_size_range" : [tuple] with 2 [int] elements
+ (**o**) "metric" : [function] (number, number) -> number
- "neighborhood_hash"
+ (**o**) "nh_type" : [str] "simple" or "count-sensitive"
+ (**o**) "R" : [int] > 0
+ (**o**) "bits" : [int] > 0
- "neighborhood_subgraph_pairwise_distance" or "NSPD"
+ (**o**) "r" : (int) positive integer
+ (**o**) "d" : (int) positive integer
- "odd_sth"
+ (**o**) "h" : [int] > 0
- "propagation"
+ (**o**) t_max: [int] > 0
+ (**o**) T: [dict] [int]: [np.arrays]
+ (**o**) with_attributes: [bool], default=False
+ (**o**) M: [str] {"H", "TV"} if `with_attributes=True` else {"L1", "L2"}
+ (**o**) w: [int] > 0
+ (**o**) base_kernel: [function] x:[Counter] , y:[Counter] -> [number]
- "pyramid_match"
+ (**o**) with_labels: [bool]
+ (**o**) d: [int] > 0
+ (**o**) L: [int] >= 0
- "graph_hopper"
+ (**o**) kernel_type: [str: {'linear', 'gaussian'}] or [tuple: {('gaussian', mu)}]
or [function] x:[(np.array, np.array)] , y:[(np.array, np.array)] -> [number]
- "vertex_histogram" or "subtree_wl"
*No arguments*
- "edge_histogram"
*No arguments*
2. general_kernels (this kernel will use the next kernel
on the list as base kernel)
- "weisfeiler_lehman"
+ (**o**) "niter" : [int] >= 0
- "hadamard_code"
+ (**o**) "niter" : [int] > 0
- "core_framework"
+ (**o**) "min_core" : [int] >= -1
where (**o**): stands for optional parameters
Nystroem : int or bool, optional
Defines the number of nystroem components.
To initialize the default (100 components), set -1 or 0.
n_jobs : int or None, optional
Defines the number of jobs of a joblib.Parallel objects needed for parallelization
or None for direct execution. The use or not of this function depends on each kernel.
normalize : bool, optional
Normalize the output of the graph kernel.
Ignored when Nystroem GraphKernel object is instanciated.
verbose : bool, optional
Define if messages will be printed on stdout.
random_seed : int, optional
Initialize can provide a randomness by providing a random seed.
Attributes
----------
kernel_ : function
The full kernel applied between graph objects.
nystroem_ : int
Holds the nystroem, number of components.
If not initialized, it stands as a False
boolean variable.
components_ : array, shape=(n_components, n_features)
Subset of training graphs used to construct the feature map.
nystroem_normalization_ : array, shape=(n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
component_indices_ : array, shape=(n_components)
Indices of ``components_`` in the training set.
initialized_ : dict
Monitors which parameter derived object should be initialized.
"""
def __init__(self,
kernel=None,
normalize=False,
verbose=False,
n_jobs=None,
random_seed=default_random_seed_value,
Nystroem=False):
"""`__init__` for `GraphKernel` object."""
self.kernel = kernel
self.normalize = normalize
self.verbose = verbose
self.n_jobs = n_jobs
self.random_seed = random_seed
self.Nystroem = Nystroem
self.initialized_ = {"kernel": False,
"Nystroem": False,
"n_jobs": False}
def fit(self, X, y=None):
"""Fit a dataset, for a transformer.
Parameters
----------
X : iterable
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given grap
format). The train samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
# Initialize the Graph Kernel.
self.initialize_()
# Input validation and parsing
if bool(self.nystroem_):
X = list(X)
nx = len(X)
# get basis vectors
if self.nystroem_ > nx:
n_components = nx
warnings.warn("n_components > n_samples. This is not "
"possible.\nn_components was set to n_samples"
", which results in inefficient evaluation of"
" the full kernel.")
else:
n_components = self.nystroem_
n_components = min(nx, n_components)
inds = np.random.permutation(nx)
basis_inds = inds[:n_components]
basis = [X[i] for i in basis_inds]
# sqrt of kernel matrix on basis vectors
U, S, V = svd(self.kernel_.fit_transform(basis))
S = np.maximum(S, 1e-12)
self.nystroem_ = n_components
self.nystroem_normalization_ = np.dot(U / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
else:
self.kernel_.fit(X)
# Return the transformer
return self
def transform(self, X):
"""Calculate the kernel matrix, between given and fitted dataset.
Parameters
----------
X : iterable
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given graph
format). If None the kernel matrix is calculated upon fit data.
The test samples.
Returns
-------
K : numpy array, shape = [n_targets, n_input_graphs]
corresponding to the kernel matrix, a calculation between
all pairs of graphs between target an features
"""
# Check if nystroem has been initialized had been called
if bool(self.nystroem_):
check_is_fitted(self, 'components_')
# Transform - calculate kernel matrix
if bool(self.nystroem_):
K = self.kernel_.transform(X).dot(self.nystroem_normalization_.T)
else:
K = self.kernel_.transform(X)
return K
def update_kernel(self, X):
"""Update the kernel with new graphs
Parameters
----------
X : iterable
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given graph
format). If None the kernel matrix is calculated upon fit data.
The test samples.
Returns
-------
K : numpy array, shape = [n_targets, n_input_graphs]
corresponding to the kernel matrix, a calculation between
all pairs of graphs between target an features
all pairs of graphs between target an features
"""
return self.kernel_.update_kernel(X)
def replace_kernel(self, Xn, inds):
"""Update the kernel with new graphs
Parameters
----------
X : iterable
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given graph
format). If None the kernel matrix is calculated upon fit data.
The test samples.
inds : list
Each element must be a valid index of graphs that is being replaced
by the new graph
Returns
-------
K : numpy array, shape = [n_targets, n_input_graphs]
corresponding to the kernel matrix, a calculation between
all pairs of graphs between target an features
all pairs of graphs between target an features
"""
return self.kernel_.replace_kernel(Xn, inds)
def fit_transform(self, X, y=None):
"""Fit and transform, on the same dataset.
Parameters
----------
X : iterable
Each element must be an iterable with at most three features and at
least one. The first that is obligatory is a valid graph structure
(adjacency matrix or edge_dictionary) while the second is
node_labels and the third edge_labels (that fitting the given graph
format). If None the kernel matrix is calculated upon fit data.
The test samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
K : numpy array, shape = [n_targets, n_input_graphs]
corresponding to the kernel matrix, a calculation between
all pairs of graphs between target an features
"""
# Initialize the Graph Kernel
self.initialize_()
# Transform - calculate kernel matrix
if bool(self.nystroem_):
self.fit(X)
K = self.kernel_.transform(X).dot(self.nystroem_normalization_.T)
else:
K = self.kernel_.fit_transform(X)
return K
def initialize_(self):
"""Initialize all transformer arguments, needing initialisation."""
if not self.initialized_["Nystroem"]:
if type(self.Nystroem) not in [int, bool]:
raise ValueError('Nystroem parameter must be an int, '
'indicating the number of components'
'or a boolean')
elif self.Nystroem is False:
self.nystroem_ = False
elif self.Nystroem in [0, -1] or self.Nystroem is True:
# picking default number of components
self.nystroem_ = default_n_components
elif self.Nystroem <= 0:
raise ValueError('number of nystroem components '
'must be positive')
else:
self.nystroem_ = self.Nystroem
self.initialized_["Nystroem"] = True
if not self.initialized_["kernel"] or not self.initialized_["n_jobs"]:
if self.kernel is None:
raise ValueError('kernel must be defined at the __init__ '
'function of the graph kernel decorator ')
else:
hidden_args = {"verbose": self.verbose,
"normalize": self.normalize,
"n_jobs": self.n_jobs}
k = self.kernel
if type(k) is dict:
# allow single kernel dictionary inputs
k = [self.kernel]
elif type(k) is not list:
raise ValueError('unsupported kernel format')
kernel, params = self.make_kernel_(
copy.deepcopy(k), hidden_args)
self.kernel_ = kernel(**params)
self.initialized_["kernel"] = True
def make_kernel_(self, kernel_list, hidden_args):
"""Produce the desired kernel function.
Parameters
----------
kernel_list : (list)
List of kernel dictionaries as defined at the documentation
of class parameters.
Returns
-------
kernel : kernel (class).
Returns an instance of a kernel type object corresponding to the
certain kernel.
"""
kernel = kernel_list.pop(0)
if type(kernel) is not dict:
raise ValueError('each element of the list of kernels must'
' be a dictionary')
if "name" not in kernel:
raise ValueError('each dictionary concerning a kernel must'
' have a "name" parameter designating the'
'kernel')
kernel_name = kernel.pop("name")
for (keys, val) in iteritems(hidden_args):
kernel[keys] = val
if kernel_name in supported_base_kernels:
if len(kernel_list) != 0:
warnings.warn('rest kernel arguments are being ignored\
- reached base kernel')
if kernel_name in ["vertex_histogram", "subtree_wl"]:
return VertexHistogram, kernel
elif kernel_name == "random_walk":
if kernel.pop("with_labels", False):
return RandomWalkLabeled, kernel
else:
return RandomWalk, kernel
elif kernel_name == "shortest_path":
if kernel.pop("as_attributes", False):
return ShortestPathAttr, kernel
else:
return ShortestPath, kernel
elif kernel_name == "graphlet_sampling":
if ("random_seed" not in kernel and
self.random_seed is not
default_random_seed_value):
kernel["random_seed"] = self.random_seed
return GraphletSampling, kernel
elif kernel_name == "multiscale_laplacian":
if kernel.pop("which", "fast") == "slow":
kernel.pop("N", None)
return (MultiscaleLaplacian, kernel)
else:
if ("random_seed" not in kernel and
self.random_seed is not
default_random_seed_value):
kernel["random_seed"] = self.random_seed
return (MultiscaleLaplacianFast, kernel)
elif kernel_name == "subgraph_matching":
return SubgraphMatching, kernel
elif kernel_name == "lovasz_theta":
if ("random_seed" not in kernel and
self.random_seed is not
default_random_seed_value):
kernel["random_seed"] = self.random_seed
return LovaszTheta, kernel
elif kernel_name == "svm_theta":
if ("random_seed" not in kernel and
self.random_seed is not
default_random_seed_value):
kernel["random_seed"] = self.random_seed
return SvmTheta, kernel
elif kernel_name == "neighborhood_hash":
return NeighborhoodHash, kernel
elif kernel_name in ["neighborhood_subgraph_pairwise_distance",
"NSPD"]:
return NeighborhoodSubgraphPairwiseDistance, kernel
elif kernel_name == "odd_sth":
return OddSth, kernel
elif kernel_name == "propagation":
if ("random_seed" not in kernel and
self.random_seed is not
default_random_seed_value):
kernel["random_seed"] = self.random_seed
if kernel.pop("with_attributes", False):
return PropagationAttr, kernel
else:
return Propagation, kernel
elif kernel_name == "graph_hopper":
return GraphHopper, kernel
elif kernel_name == "pyramid_match":
return PyramidMatch, kernel
elif kernel_name == "edge_histogram":
return EdgeHistogram, kernel
elif kernel_name in supported_general_kernels:
if (len(kernel_list) == 0):
raise ValueError(str(kernel_name)+' is not a base kernel')
else:
kernel["base_kernel"] = self.make_kernel_(kernel_list, {})
if kernel_name == "weisfeiler_lehman":
return (WeisfeilerLehman, kernel)
elif kernel_name == "hadamard_code":
return (HadamardCode, kernel)
elif kernel_name == "core_framework":
return (CoreFramework, kernel)
else:
raise ValueError("unsupported kernel: " + str(kernel_name))
def set_params(self, **params):
"""Call the parent method."""
# Copy the parameters
params = copy.deepcopy(params)
# Iterate over the parameters
for key, value in iteritems(params):
key, delim, sub_key = key.partition('__')
if delim:
if sub_key in self.initialized_:
self.initialized_[sub_key] = False
elif key in self.initialized_:
self.initialized_[key] = False
# Set parameters
super(GraphKernel, self).set_params(**params)
|
from query.query import ProjectQuery
from utils.csv_ops import write_csv
from config import conf
from labels_from_csv import read_labels_from_csv
HEADERS = ['commit_id', 'la', 'ld', 'lt', 'ns', 'nd', 'nf', 'entropy', 'fix', 'ndev', 'age', 'nuc', 'exp', 'rexp', 'sexp']
def combine_label_with_features(change_list, bug_introducing_set):
new_change_list = list()
for c in change_list:
tmp_data = c
assert isinstance(tmp_data, dict)
tmp_data['label'] = False
if c['commit_id'] in bug_introducing_set:
tmp_data['label'] = True
new_change_list.append(tmp_data)
return new_change_list
def csv_to_disk(project):
assert isinstance(project, str)
q = ProjectQuery(project)
change_list = q.combine()
assert isinstance(change_list, list)
label_csv_path = conf.project_label_csv_path(project)
bug_introducing_set = read_labels_from_csv(label_csv_path)
new_change_list = combine_label_with_features(change_list, bug_introducing_set)
project_csv_path = conf.project_feature_csv_path(project)
write_csv(project_csv_path, new_change_list, HEADERS)
if __name__ == '__main__':
for p in conf.projects:
csv_to_disk(p)
|
import pytest
import click
import os
import pathlib
import ghia.configreader as creader
import ghia.rule as rule
def config(name):
return pathlib.Path(__file__).parent / 'fixtures' / name
def test_read_auth_empty_file():
with pytest.raises(Exception):
creader.read_auth(config('auth.invalid.cfg'))
def test_read_auth_only_token():
result = creader.read_auth(config('auth.no-secret.cfg'))
assert result == ('abc', None)
def test_read_auth():
result = creader.read_auth(config('auth.cfg'))
assert result == ('abc', 'passwd')
# ------------------------------------------------------------------
def test_read_rules_empty_file():
with pytest.raises(Exception):
creader.read_auth(config('rules.invalid.cfg'))
def test_read_rules_empty():
result = creader.read_rules(config('rules.empty.cfg'))
assert result != None
assert result == ([], None)
def test_read_rules_fallback():
result = creader.read_rules(config('rules.fallback.cfg'))
assert result != None
assert result == ([], 'fallback')
def test_read_rules_fallback_invalid():
with pytest.raises(Exception):
creader.read_rules(config('rules.fallback.invalid.cfg'))
def test_read_rules_only_one():
rules = rule.RuleSet('GhUser')
rules.add(rule.Rule('title', 'test'))
rules.add(rule.Rule('text', 'protocol'))
expected = ([rules], 'fallback')
result = creader.read_rules(config('rules.one.cfg'))
assert result == expected
def test_read_rules_multiple():
a = rule.RuleSet('AB')
a.add(rule.Rule('title', 'protocol'))
a.add(rule.Rule('text', 'secret'))
b = rule.RuleSet('GhUser')
b.add(rule.Rule('title', 'test'))
b.add(rule.Rule('text', 'protocol'))
result = creader.read_rules(config('rules.multiple.cfg'))
assert result == ([a, b], 'fallback')
|
import os
from shutil import copyfile
import pytest
from cascade.word_header_footer import replace_in_header_footer
# Get the path to the test directory (this file's path)
test_root_path = os.path.dirname(os.path.realpath(__file__))
def test_header_footer():
in_filename = os.path.join(test_root_path, 'assets', 'asset__header_footer.docx')
out_filename = os.path.join(test_root_path, 'results', 'result__header_footer.docx')
search_list = (
dict(find='28-5018-10', replace='98-7654-32'),
dict(find='Feature Description Document (FDD), RAVE', replace='New Document Title'),
)
copyfile(in_filename, out_filename)
replace_in_header_footer(out_filename, search_list)
#TODO: There is no automated checking of the resulting output.
# Need to add a way to search and validate the output
# document. |
"""Module containing class `Plugin`."""
class Plugin:
"""
Abstract base class for plugins.
This class has all of the attributes required of plugin classes,
but the attributes have `None` values instead of real ones. The
attributes are provided mainly for the purpose of documentation.
A subclass must provide real values for the attributes.
"""
name = None
"""The name of this plugin, a string."""
version = None
"""The version of this plugin, a string."""
description = None
"""Description of this plugin, a string."""
author = None
"""The author of this plugin, a string."""
license = None
"""The license of this plugin, a string."""
type = None
"""
The plugin type of this plugin, a subclass of the `PluginType` class.
This attribute is set automatically when the plugin is loaded,
according to the plugin's declared setuptools entry point group.
"""
interface = None
"""
The plugin interface implemented by this plugin, an abstract
`Plugin` subclass.
This attribute is set automatically when the plugin is loaded,
according to the plugin interface that the plugin subclasses.
Every plugin must be a subclass of exactly one of the supported
plugin interfaces of its plugin type.
"""
|
# Git Author
n, m,l = list(map(int, input().split()))
if (n <= l and l <= m) :
print("Yes")
elif (m <= l and l <= n):
print("Yes")
else :
print("No")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.