content
stringlengths 5
1.05M
|
|---|
from typing import Optional, Any
import torch
from transformers import AutoModel
from flambe.nn import Module
class PretrainedTransformerEmbedder(Module):
"""Embedder intergation of the transformers library.
Instantiate this object using any alias available in the
`transformers` library. More information can be found here:
https://huggingface.co/transformers/
"""
def __init__(self,
alias: str,
cache_dir: Optional[str] = None,
padding_idx: Optional[int] = None,
pool: bool = False, **kwargs) -> None:
"""Initialize from a pretrained model.
Parameters
----------
alias: str
Alias of a pretrained model.
cache_dir: str, optional
A directory where to cache the downloaded vocabularies.
padding_idx: int, optional
The padding index used to compute the attention mask.
pool: optional, optional
Whether to return the pooled output or the full sequence
encoding. Default ``False``.
"""
super().__init__()
if 'gpt2' in alias and pool:
raise ValueError('GPT2 does not support pooling.')
embedder = AutoModel.from_pretrained(alias, cache_dir=cache_dir, **kwargs)
self.config = embedder.config
self._embedder = embedder
self.padding_idx = padding_idx
self.pool = pool
def forward(self,
data: torch.Tensor,
token_type_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""Perform a forward pass through the network.
If pool was provided, will only return the pooled output
of shape [B x H]. Otherwise, returns the full sequence encoding
of shape [S x B x H].
Parameters
----------
data : torch.Tensor
The input data of shape [B x S]
token_type_ids : Optional[torch.Tensor], optional
Segment token indices to indicate first and second portions
of the inputs. Indices are selected in ``[0, 1]``: ``0``
corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. Has shape [B x S]
attention_mask : Optional[torch.Tensor], optional
FloatTensor of shape [B x S]. Masked values should
be 0 for padding tokens, 1 otherwise.
position_ids : Optional[torch.Tensor], optional
Indices of positions of each input sequence tokens
in the position embedding. Defaults to the order given
in the input. Has shape [B x S].
head_mask : Optional[torch.Tensor], optional
Mask to nullify selected heads of the self-attention
modules. Should be 0 for heads to mask, 1 otherwise.
Has shape [num_layers x num_heads]
Returns
-------
torch.Tensor
If pool is True, returns a tneosr of shape [B x H],
else returns an encoding for each token in the sequence
of shape [B x S x H].
"""
if attention_mask is None and self.padding_idx is not None:
attention_mask = (data != self.padding_idx).float()
outputs = self._embedder(data,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask)
output = outputs[0] if not self.pool else outputs[1]
return output
def __getattr__(self, name: str) -> Any:
"""Override getattr to inspect config.
Parameters
----------
name : str
The attribute to fetch
Returns
-------
Any
The attribute
"""
try:
return super().__getattr__(name)
except AttributeError as e:
config = self.__dict__['config']
if hasattr(config, name):
return getattr(config, name)
else:
raise e
|
import torch
import torch.nn as nn
import torch.nn.functional as F
IMG_LEN = 1024
TXT_LEN = 300
N_TOPICS = 50
N_WORTHINESSES = 2
class MultitargetTridentModelBN(nn.Module):
def __init__(self, d=128, drop=0.25, worthiness_trident=False):
super().__init__()
self.worthiness_trident = worthiness_trident
self.fc_img_1 = nn.Linear(IMG_LEN, d * 4)
self.bn_img_1 = nn.BatchNorm1d(num_features=d * 4)
self.fc_img_2 = nn.Linear(d * 4, d * 2)
self.bn_img_2 = nn.BatchNorm1d(num_features=d * 2)
self.fc_txt_1 = nn.Linear(TXT_LEN, d * 2)
self.bn_txt_1 = nn.BatchNorm1d(num_features=d * 2)
self.fc_txt_2 = nn.Linear(d * 2, d * 2)
self.bn_txt_2 = nn.BatchNorm1d(num_features=d * 2)
self.fc1 = nn.Linear(d * 4, d)
self.bn1 = nn.BatchNorm1d(num_features=d)
self.fc2 = nn.Linear(d, d)
self.bn2 = nn.BatchNorm1d(num_features=d)
self.out_topics_common = nn.Linear(d, N_TOPICS)
self.out_topics_img = nn.Linear(d * 2, N_TOPICS)
self.out_topics_txt = nn.Linear(d * 2, N_TOPICS)
self.out_worthiness_common = nn.Linear(d, N_WORTHINESSES)
if self.worthiness_trident:
self.out_worthiness_img = nn.Linear(d * 2, N_WORTHINESSES)
self.out_worthiness_txt = nn.Linear(d * 2, N_WORTHINESSES)
self.dropout = nn.modules.Dropout(p=drop)
def forward(self, inp_img, inp_txt):
x_img = self.bn_img_1(F.relu(self.fc_img_1(inp_img)))
x_img = self.dropout(x_img)
x_img = self.bn_img_2(F.relu(self.fc_img_2(x_img)))
x_img = self.dropout(x_img)
x_txt = self.bn_txt_1(F.relu(self.fc_txt_1(inp_txt)))
x_txt = self.dropout(x_txt)
x_txt = self.bn_txt_2(F.relu(self.fc_txt_2(x_txt)))
x_txt = self.dropout(x_txt)
x = torch.cat((x_img, x_txt), 1)
x = self.dropout(self.bn1(F.relu(self.fc1(x))))
x = self.bn2(F.relu(self.fc2(x)))
out_topics_common = F.log_softmax(self.out_topics_common(x), dim=1)
out_topics_img = F.log_softmax(self.out_topics_img(x_img), dim=1)
out_topics_txt = F.log_softmax(self.out_topics_txt(x_txt), dim=1)
out_worthiness_common = F.log_softmax(self.out_worthiness_common(x), dim=1)
if self.worthiness_trident:
out_worthiness_img = F.log_softmax(self.out_worthiness_img(x_img), dim=1)
out_worthiness_txt = F.log_softmax(self.out_worthiness_txt(x_txt), dim=1)
return (out_topics_common,
out_topics_img,
out_topics_txt,
out_worthiness_common,
out_worthiness_img,
out_worthiness_txt)
else:
return (out_topics_common,
out_topics_img,
out_topics_txt,
out_worthiness_common)
|
# Copyright 2016 OSNEXUS Corporation
# See LICENSE file for details.
import requests
import json
from uuid import UUID
from requests.auth import HTTPBasicAuth
import socket
import time
import subprocess
import uuid
from twisted.python.filepath import FilePath
from flocker.node.agents.blockdevice import (
AlreadyAttachedVolume, IBlockDeviceAPI, IProfiledBlockDeviceAPI,
BlockDeviceVolume, UnknownVolume, UnattachedVolume
)
from qsclient import qsclient
from qsclient import Host
from qsclient import Pool
class VolumeProfiles():
"""
:ivar GOLD: The profile for fast storage.
:ivar SILVER: The profile for intermediate/default storage.
:ivar BRONZE: The profile for cheap storage.
:ivar DEFAULT: The default profile if none is specified.
"""
PROFILE_GOLD = 'gold'
PROFILE_SILVER = 'silver'
PROFILE_BRONZE = 'bronze'
PROFILE_DEFAULT = PROFILE_GOLD
PROFILE_GOLD_TIER = 'flocker_def_gold_tier'
PROFILE_SILVER_TIER = 'flocker_def_silver_tier'
PROFILE_BRONZE_TIER = 'flocker_def_bronze_tier'
PROFILE_DEFAULT_POOL = 'flocker_def_pool'
class osnexusAPI(object):
def __init__(self, ipAddress, username, password, gold_tier, silver_tier, bronze_tier, default_pool, logger):
self._qsclient = qsclient(ipAddress, username, password, logger)
self._ipAddress = ipAddress
self._hostIqn = ""
self._osnexusHostId = ""
self._osnexusDefPoolId = ""
self._osnexusTierId = ""
self._gold_tier = gold_tier
self._silver_tier = silver_tier
self._bronze_tier = bronze_tier
self._default_pool = default_pool
self._logger = logger
if(gold_tier == ""):
self._gold_tier = VolumeProfiles.PROFILE_GOLD_TIER
if(silver_tier == ""):
self._silver_tier = VolumeProfiles.PROFILE_SILVER_TIER
if(bronze_tier == ""):
self._bronze_tier = VolumeProfiles.PROFILE_BRONZE_TIER
if(default_pool == ""):
self._default_pool = VolumeProfiles.PROFILE_DEFAULT_POOL
def listOsnexusVolumes(self):
try:
# volumes is the flocker data type
volumes = []
qs_vols = self._qsclient.list_volumes()
for vol in qs_vols:
#Add only the volumes starting with "flockerVol-"
volUuid = self.getDataSetId(vol._name)
if volUuid is None:
continue
#Now get the host access list for this volume to figure out if it is attached
if self.isVolumeAttached(vol._id) == True:
volumes.append(BlockDeviceVolume(
blockdevice_id=unicode(vol._id),
size=int(vol._size),
attached_to=unicode(socket.gethostbyname(socket.getfqdn())),
dataset_id=volUuid))
else:
volumes.append(BlockDeviceVolume(
blockdevice_id=unicode(vol._id),
size=int(vol._size),
attached_to=None,
dataset_id=volUuid))
except Exception as e:
self._logger.error("List volume failed with exception")
raise e
return volumes
# Flocker passes in dataset_id which is a UID. We use the dataset_id in the quantastor volume name along with "flockerVol-"
# The internal object ID created by quantastor for this volume is used in the blockdevice_id
# All other flocker entry points pass in the blockdevice_id, which can be directly used for making API calls on the volume.
def createOsnexusVolume(self, dataset_id, size):
try:
if self._osnexusDefPoolId == "":
def_pool = self._qsclient.get_pool(self._default_pool)
self._osnexusDefPoolId = def_pool._id
volName="flockerVol-{0}".format(dataset_id)
# Check the pools and pass that to qsclient.create_volume
vol = self._qsclient.create_volume(volName, size, "createdbyFlocker", self._osnexusDefPoolId)
flocker_volume = BlockDeviceVolume(
blockdevice_id=vol._id,
size=vol._size,
attached_to=None,
dataset_id=dataset_id)
return flocker_volume
except Exception as e:
self._logger.error("Create volume failed. Dataset Id '%s'", dataset_id)
raise e
def createOsnexusVolumeWithProfile(self, dataset_id, size, profile_name):
try:
tier = ""
if profile_name == VolumeProfiles.PROFILE_GOLD:
tier = self._gold_tier
elif profile_name == VolumeProfiles.PROFILE_SILVER:
tier = self._silver_tier
elif profile_name == VolumeProfiles.PROFILE_BRONZE:
tier = self._bronze_tier
elif profile_name == VolumeProfiles.PROFILE_DEFAULT:
return self.createOsnexusVolume(dataset_id, size)
if self._osnexusTierId == "":
tier = self._qsclient.get_tier(tier)
self._osnexusTierId = tier._id
volName="flockerVol-{0}".format(dataset_id)
# Check the pools and pass that to qsclient.create_volume
vol = self._qsclient.create_volume(volName, size, "createdbyFlocker", self._osnexusTierId)
flocker_volume = BlockDeviceVolume(
blockdevice_id=vol._id,
size=vol._size,
attached_to=None,
dataset_id=dataset_id)
return flocker_volume
except Exception as e:
self._logger.error("Create volume with profile failed. Dataset Id '%s'", dataset_id)
raise e
def deleteOsnexusVolume(self, blockdevice_id):
try:
vol = self.validateVolume(blockdevice_id)
if self.isVolumeAttached(blockdevice_id) is True:
try:
self.doIscsiLogout(vol._name)
except Exception as e:
self._logger.error("failed to logout in deleteVolume. blockdevice id '%s'", blockdevice_id)
raise e
self._qsclient.delete_volume(blockdevice_id)
except Exception as e:
self._logger.error("Delete volume failed. block device Id '%s'", blockdevice_id)
raise e
# This function returns the datasetId from the volume name
def getDataSetId(self, volName):
if volName.find("flockerVol-") != 0:
return None
volName = volName[11:]
volUuid = UUID(volName)
return volUuid
def validateVolume(self, blockdevice_id):
try:
vol = self._qsclient.get_volume(blockdevice_id)
volName = vol._name
if volName.find("flockerVol-") != 0:
raise UnknownVolume(blockdevice_id)
return vol
except Exception as e:
raise UnknownVolume(blockdevice_id)
def isVolumeAttached(self, blockdevice_id):
acl_list = self._qsclient.volume_acl_enum(blockdevice_id)
if len(acl_list) == 0:
return False
found = False
for acl in acl_list:
if acl._volid == blockdevice_id and acl._hostid.find(self._osnexusHostId) != -1:
found = True
break
return found
def attachOsnexusVolume(self, blockdevice_id, attach_to):
vol = self.validateVolume(blockdevice_id)
volUuid = self.getDataSetId(vol._name)
if volUuid is None:
raise UnknownVolume(blockdevice_id)
if self.isVolumeAttached(blockdevice_id) is True:
raise AlreadyAttachedVolume(blockdevice_id)
self.createHost()
self._qsclient.volume_attach(blockdevice_id, self._osnexusHostId)
try:
self.doIscsiLogin(vol._name)
except Exception as e:
self._logger.error("failed to login")
raise UnattachedVolume(blockdevice_id)
return BlockDeviceVolume(
blockdevice_id=blockdevice_id,
size=int(vol._size),
attached_to=attach_to,
dataset_id=volUuid)
def detachOsnexusvolume(self, blockdevice_id):
vol = self.validateVolume(blockdevice_id)
if self.isVolumeAttached(blockdevice_id) is False:
raise UnattachedVolume(blockdevice_id)
try:
self.doIscsiLogout(vol._name)
except Exception as e:
self._logger.error("failed to logout")
raise UnattachedVolume(blockdevice_id)
self._qsclient.volume_dettach(blockdevice_id, self._osnexusHostId)
def getOsNexusDevicePath(self, blockdevice_id):
vol = self.validateVolume(blockdevice_id)
if self.isVolumeAttached(blockdevice_id) is False:
raise UnattachedVolume(blockdevice_id)
targetIqn = self.iscsiDiscovery(vol._name)
return self.getIscsiPath(targetIqn)
def doIscsiLogin(self, volName):
try:
targetIqn = self.iscsiDiscovery(volName)
except Exception as e:
self._logger.error("Volume [" + volName + "] not found during discovery")
raise UnattachedVolume(volName)
self.iscsiLogin(targetIqn)
def doIscsiLogout(self, volName):
try:
targetIqn = self.iscsiDiscovery(volName)
except Exception as e:
self._logger.error("Volume [" + volName + "] not found during discovery")
raise UnattachedVolume(volName)
self.iscsiLogout(targetIqn)
# Does iscsi discovery and returns the target IQN for the specified volume
def iscsiDiscovery(self, volName):
time.sleep(2)
cmd = "iscsiadm -m discovery -t st -p " + self._ipAddress
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(output, err) = p.communicate()
listOutput = output.split('\n')
volFound = False
for line in listOutput:
if line.find(volName) != -1:
#Found the volume during iscsi discovery
splitline = line.split()
return splitline[1]
if volFound is False:
raise Exception("Failed to discover volume")
def iscsiLogin(self, targetIqn):
cmd = "iscsiadm -m node -l -T " + targetIqn
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(output, err) = p.communicate()
if output.find("successful"):
time.sleep(3)
return self.getIscsiPath(targetIqn)
else:
raise Exception("Failed to do iscsi login to the volume")
def iscsiLogout(self, targetIqn):
#print "iscsiLogout " + targetIqn
cmd = "iscsiadm -m node -u -T " + targetIqn
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(output, err) = p.communicate()
if output.find("successful"):
time.sleep( 2 )
return
else:
raise Exception("Failed to do iscsi logout to the volume")
def getIscsiPath(self, targetIqn):
#print "getIscsiPath " + targetIqn
cmd = "ls /dev/disk/by-path/"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(output, err) = p.communicate()
listOutput = output.split('\n')
volFound = False
for line in listOutput:
if line.find(targetIqn) != -1:
cmd = "readlink -f /dev/disk/by-path/" + line
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
(output, err) = p.communicate()
if output.find("dev"):
return FilePath(output.strip())
else:
self._logger.error("Failed to find path from readlink")
raise Exception("Failed to find path from readlink")
if volFound == False:
self._logger.error("Failed to find device-by-path")
raise Exception("Failed to find device-by-path")
# Read the host iscsi IQN from the local file '/etc/iscsi/initiatorname.iscsi'
# If the IQN is found, it assigns it to self._hostIqn for later use. Once it is set, we don't have to read
# the file again
def readIscsiIqn(self):
if self._hostIqn != "":
return
try:
f = open('/etc/iscsi/initiatorname.iscsi')
except IOError:
self._logger.error("File /etc/iscsi/initiatorname.iscsi not found !!")
raise Exception("File /etc/iscsi/initiatorname.iscsi not found !!")
## Find the line containing InitiatorName
line = f.readline()
while line:
if line.startswith("InitiatorName="):
line.strip()
self._hostIqn = line.rsplit('=')[1]
self._hostIqn.strip()
break
line = f.readline()
f.close()
#Create a host in quantastor. If a host with the flocker node local IQN exists in quantastor, then the ID of
# that quantastor host is used for attach/dettach operations
# TODO : Should we always check with quantastor if the host id is valid ??
def createHost(self):
#Check if a host with this iqn exists
if self._osnexusHostId != "":
return
self.readIscsiIqn()
if self._hostIqn == "":
print "InitiatorName not found in the file /etc/iscsi/initiatorname.iscsi"
#TODO : Error ?? Handle this
else:
print self._hostIqn
try:
host = self._qsclient.host_initiator_get(self._hostIqn)
self._osnexusHostId = host._hostId
except Exception as e:
# Host initiator was not found. Create a new one
#Formulate a host name
hostName = "FlockerHost-" + uuid.uuid1().__str__()
host = self._qsclient.create_host(hostName, self._hostIqn)
self._osnexusHostId = host._hostId
|
# Copyright (c) 2022 McCoy Software Solutions
# Apache License 2.0
import random
chanceToLove = 33.3
chanceToFight = 33.3
chanceToBefriend = 33.3
decision = random.randint(0,100)
print("Chance To Love: %d" % chanceToLove)
print("Chance To Fight: %d" % chanceToFight)
print("Chance To Befriend: %d" % chanceToBefriend)
print("Decision: %d" % decision)
if decision <= 0 + chanceToLove:
print("love")
elif chanceToLove < decision < chanceToLove + chanceToFight:
print("fight")
elif chanceToLove + chanceToFight < decision < 100:
print("befriend")
|
print('File IO')
import os
test_file=open("test.txt","wb")
print(test_file.mode)
print(test_file.name)
test_file.write(bytes("Write me to the file \n",'UTF-8'))
test_file.close()
test_file= open("test.txt","r+")
text_in_file= test_file.read()
print(text_in_file)
test_file.close()
os.remove("test.txt")
os.remove("text.txt")
|
primeiroTermo = int(input('Primeiro termo: '))
razao = int(input('Razão da PA: '))
termo = primeiroTermo
cont = 0
total = 0
maisTermos = 10
while maisTermos != 0:
total += maisTermos
while cont < total:
print(f'{termo} -> ', end='')
termo += razao
cont += 1
print('PAUSA')
maisTermos = int(input('Quantos termos você quer mostrar a mais? '))
print(f'Progressão finalizada com {total} termos mostrados.')
|
# create fake data
from django.core.management.base import BaseCommand, CommandError
from faker import Faker
from dashboard.serializers import create_aclpermissions_for_role
fake = Faker()
from systemconfig.models import Country, Nationality, State, CityTown, Religion, Languages, Relationship, Occupation, \
Hobby, District, Caste
from dashboard.models import Widget, Roles, UserTypes, SystemSettings
DATA = {
'user_types': {'Super Admin': 'SU', 'Admin': 'A','Anonymous': 'AN'},
'system_settings': {
'maintenance_mode': '0', 'frontend_url': '', 'backend_url': '', 'sendsms_verifiednumbers_only':'0',
'sub_domain': '',
},
'roles': ['Administrator', 'Manager', 'Anonymous'],
'religions': ['Christian', 'Hindu', 'Muslim', 'Other'],
'languages': ['English', 'Malayalam', 'Tamil', 'Hindi', 'Other'],
'relationships': ['Father', 'Mother', 'Brother', 'Sister', 'Uncle', 'Other'],
'occupations': ['Business', 'Teacher', 'Software Engineer', 'Driver', 'Other'],
'student_categories': ['ST', 'SC', 'OBC''Nair', 'Bhramin', 'Roman Catholic', 'Latin Catholic', 'Others', ],
'castes': ['Roman Catholic', 'Latin Catholic', 'Others', ],
'nationalities': ['Indian', 'American', 'Sri Lankan', 'Other'],
'states': ['Kerala', 'Tamilnadu', 'Karnataka', 'Other'],
'districts': ['Thrissur', 'Ernakulam'],
'cities': ['Thrissur', 'Guruvayur', 'Kunnamkulam', 'Other'],
'hobbies': ['Reading', 'Writing', 'Drawing', 'Listening Music'],
'countries': ['India', 'America', 'Other'],
'designations': ['Principal', 'Manager', 'Teacher', 'Driver', 'Other'],
'widgets': {'profile':''},
}
class Command(BaseCommand):
help = 'Populate data'
def handle(self, *args, **options):
# Roles should be created from the system in the below order.
# 'Roles - Primary Key value - These are static in a system
# 'Principal - 3', 'Vice Principal - 4', 'Administrator - 5', 'Manager - 6', 'Employee - 7' , 'Teacher - 8', 'Student - 9', 'Parent - 10'
# 'Librarian - 11', 'Anonymous - 12'
# ++++++++++++++++++++++++ Adding Roles ++++++++++++++++++++++++
for role in DATA['roles']:
role_type = DATA['user_types'].get(role,'')
data_exist = Roles.objects.filter(name__exact=role)
if data_exist:
if data_exist[0].name == role:
if data_exist[0].role_type == role_type:
print("Skipping: " + role)
else:
print("Updated: " + role)
data_exist.update(role_type=role_type)
continue
# role_type = DATA['user_types'].get(role,'')
role_created = Roles.objects.create(name=role, role_type=role_type, status=1)
print("Added: " + role)
create_aclpermissions_for_role(role_created)
# ++++++++++++++++++++++++ End of Adding Roles +++++++++++++++++
# ++++++++++++++++++++++++ Adding User Types ++++++++++++++++++++++++
for key, val in DATA['user_types'].items():
data_exist = UserTypes.objects.filter(user_type__exact=val).all()
if not data_exist:
print("Added: " + key)
UserTypes.objects.create(name=key, user_type=val, status=1)
else:
if data_exist[0].user_type == val:
print("Skipping: " + val)
else:
print("Added: " + key)
UserTypes.objects.create(name=key, user_type=val, status=1)
# ++++++++++++++++++++++++ End of Adding User Types +++++++++++++++++
# ++++++++++++++++++++++++ Adding System Settings ++++++++++++++++++++++++
for key, val in DATA['system_settings'].items():
data_exist = SystemSettings.objects.filter(key__exact=key).all()
if not data_exist:
print("Added: " + key)
SystemSettings.objects.create(key=key, value=val, status=1)
else:
if data_exist[0].key == key:
print("Skipping: " + key)
else:
print("Added: " + key)
SystemSettings.objects.create(key=key, value=val, status=1)
# ++++++++++++++++++++++++ End of Adding System Settings +++++++++++++++++
# ++++++++++++++++++++++++ Adding Religions ++++++++++++++++++++++++
# religion = Religion.objects.create(religion_name=DATA['religions'][i], status=1)
for religion in DATA['religions']:
data_exist = Religion.objects.filter(religion_name__exact=religion).all()
if not data_exist:
print("Added: " + religion)
Religion.objects.create(religion_name=religion, status=1)
else:
if data_exist[0].religion_name == religion:
print("Skipping: " + religion)
else:
print("Added: " + religion)
Religion.objects.create(religion_name=religion, status=1)
# ++++++++++++++++++++++++ End of Adding Religions +++++++++++++++++
# ++++++++++++++++++++++++ Adding Castes ++++++++++++++++++++++++
for caste in DATA['castes']:
data_exist = Caste.objects.filter(name__exact=caste).all()
if not data_exist:
print("Added: " + caste)
Caste.objects.create(name=caste, status=1)
else:
if data_exist[0].name == caste:
print("Skipping: " + caste)
else:
print("Added: " + caste)
Caste.objects.create(name=caste, status=1)
# ++++++++++++++++++++++++ End of Adding Castes +++++++++++++++++
# ++++++++++++++++++++++++ Adding Languages ++++++++++++++++++++++++
# language = Languages.objects.create(language_name=DATA['languages'][i], status=1)
for language in DATA['languages']:
data_exist = Languages.objects.filter(language_name__exact=language).all()
if not data_exist:
print("Added: " + language)
Languages.objects.create(language_name=language, status=1)
else:
if data_exist[0].language_name == language:
print("Skipping: " + language)
else:
print("Added: " + language)
Languages.objects.create(language_name=language, status=1)
# ++++++++++++++++++++++++ End of Adding Religions +++++++++++++++++
# ++++++++++++++++++++++++ Adding Hobbies ++++++++++++++++++++++++
for hobby in DATA['hobbies']:
data_exist = Hobby.objects.filter(name__exact=hobby).all()
if not data_exist:
print("Added: " + hobby)
Hobby.objects.create(name=hobby, status=1)
else:
if data_exist[0].name == hobby:
print("Skipping: " + hobby)
else:
print("Added: " + hobby)
Hobby.objects.create(name=hobby, status=1)
# ++++++++++++++++++++++++ End of Adding Hobbies +++++++++++++++++
# ++++++++++++++++++++++++ Adding Countries ++++++++++++++++++++++++
# country = Country.objects.create(country_name='India', status=1)
for country in DATA['countries']:
data_exist = Country.objects.filter(country_name__exact=country).all()
if not data_exist:
print("Added: " + country)
Country.objects.create(country_name=country, status=1)
else:
if data_exist[0].country_name == country:
print("Skipping: " + country)
else:
print("Added: " + country)
Country.objects.create(country_name=country, status=1)
# ++++++++++++++++++++++++ End of Adding Countries +++++++++++++++++
# ++++++++++++++++++++++++ Adding Relationships ++++++++++++++++++++++++
# relationship = Relationship.objects.create(name=DATA['relationships'][i], status=1)
for relationship in DATA['relationships']:
data_exist = Relationship.objects.filter(name__exact=relationship).all()
if not data_exist:
print("Added: " + relationship)
Relationship.objects.create(name=relationship, status=1)
else:
if data_exist[0].name == relationship:
print("Skipping: " + relationship)
else:
print("Added: " + relationship)
Relationship.objects.create(name=relationship, status=1)
# ++++++++++++++++++++++++ End of Adding Countries +++++++++++++++++
# ++++++++++++++++++++++++ Adding Occupation ++++++++++++++++++++++++
# occupation = Occupation.objects.create(name=DATA['occupations'][i], status=1)
for occupation in DATA['occupations']:
data_exist = Occupation.objects.filter(name__exact=occupation).all()
if not data_exist:
print("Added: " + occupation)
Occupation.objects.create(name=occupation, status=1)
else:
if data_exist[0].name == occupation:
print("Skipping: " + occupation)
else:
print("Added: " + occupation)
Occupation.objects.create(name=occupation, status=1)
# ++++++++++++++++++++++++ End of Adding Occupation +++++++++++++++++
# ++++++++++++++++++++++++ Adding Nationality ++++++++++++++++++++++++
# nationality = Nationality.objects.create(nationality_name=DATA['nationalities'][i])
for nationality in DATA['nationalities']:
data_exist = Nationality.objects.filter(nationality_name__exact=nationality).all()
if not data_exist:
print("Added: " + nationality)
Nationality.objects.create(nationality_name=nationality, status=1)
else:
if data_exist[0].nationality_name == nationality:
print("Skipping: " + nationality)
else:
print("Added: " + nationality)
Nationality.objects.create(nationality_name=nationality, status=1)
# ++++++++++++++++++++++++ End of Adding Nationality +++++++++++++++++
# ++++++++++++++++++++++++ Adding States ++++++++++++++++++++++++
# state = State.objects.create(state_name=DATA['states'][i], state_country=country, status=1)
for state in DATA['states']:
data_exist = State.objects.filter(state_name__exact=state).all()
if not data_exist:
print("Added: " + state)
State.objects.create(state_name=state, status=1)
else:
if data_exist[0].state_name == state:
print("Skipping: " + state)
else:
print("Added: " + state)
State.objects.create(state_name=state, status=1)
# ++++++++++++++++++++++++ End of Adding States +++++++++++++++++
# ++++++++++++++++++++++++ Adding Widgets ++++++++++++++++++++++++
# Widget.objects.create(name=item.capitalize(), code=item)
for widget, roletypes in DATA['widgets'].items():
# for widget in DATA['widgets']:
data_exist = Widget.objects.filter(code__exact=widget)
name = widget.replace('_',' ').title()
if data_exist:
if data_exist[0].code == widget:
print("Skipping: " + name)
continue
Widget.objects.create(name=name, code=widget, roletypes = roletypes, status=1)
print("Added: " + name)
# ++++++++++++++++++++++++ End of Adding Widgets +++++++++++++++++
# ++++++++++++++++++++++++ Adding Districts ++++++++++++++++++++++++
for district in DATA['districts']:
data_exist = District.objects.filter(name__exact=district).all()
if not data_exist:
print("Added: " + district)
District.objects.create(name=district, status=1)
else:
if data_exist[0].name == district:
print("Skipping: " + district)
else:
print("Added: " + district)
District.objects.create(name=district, status=1)
# ++++++++++++++++++++++++ End of Adding District +++++++++++++++++
# ++++++++++++++++++++++++ Adding City ++++++++++++++++++++++++
# citytown = CityTown.objects.create(city_name=DATA['cities'][i], city_state=state, city_country=country,status=1)
for city in DATA['cities']:
data_exist = CityTown.objects.filter(city_name__exact=city).all()
if not data_exist:
print("Added: " + city)
CityTown.objects.create(city_name=city, status=1)
else:
if data_exist[0].city_name == city:
print("Skipping: " + city)
else:
print("Added: " + city)
CityTown.objects.create(city_name=city, status=1)
# ++++++++++++++++++++++++ End of Adding City +++++++++++++++++
|
from flask import Flask
from flask import request
from flask import jsonify
from flask_cors import CORS
import os
import sys
import pandas as pd
import numpy as np
import joblib
import pickle
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils.preprocess_data import remove_time_stamp
app = Flask(__name__)
CORS(app, supports_credentials=True)
@app.route('/')
def deploy_ml_model():
return 'TRSS ML model deployed!'
@app.route('/predict', methods=['POST'])
def predict_with_ml_model():
console_output = request.json['console_content']
# pre-process input data to fit ml model
console_output_df = pd.DataFrame([console_output]).T
console_output_df.columns = ['received_content']
# pre-process: remove time stamp
console_output_df['received_content'] = [remove_time_stamp(content) for content in console_output_df['received_content']]
# pre-process: vectorize input data with vectorizer from training
ml_prototype_path = os.path.dirname(os.path.dirname(app.instance_path))
vectorizer_file_path = os.path.join(ml_prototype_path, 'data/TempData/vectorizer_file.pk')
with open(vectorizer_file_path, "rb") as vectorizer_file:
vectorizer = pickle.load(vectorizer_file)
test_df = pd.DataFrame.sparse.from_spmatrix(vectorizer.transform(console_output_df.pop('received_content')), columns=vectorizer.get_feature_names())
test_features = np.array(test_df)
# pre-process: normalize the input features using the sklearn StandardScaler from training
standardScaler_file_path = os.path.join(ml_prototype_path, 'data/TempData/standardScaler_file.pk')
with open(standardScaler_file_path, "rb") as standardScaler_file:
standardScaler = pickle.load(standardScaler_file)
test_features_standardScaler = standardScaler.transform(test_features)
# predict with saved ML model
# parent_dir_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# ml_model_path = parent_dir_path + '/MLModel.joblib'
ml_model_path = os.path.join(ml_prototype_path, 'MLModel.joblib')
savedModel = joblib.load(ml_model_path)
predicted_label = savedModel.predict(test_features_standardScaler)
print("savedModel Sample 0 predict label: ", predicted_label[0])
# transform predicted lable number in to text with num2name dict from training
# num2name_file_path = parent_dir_path + '/data/TempData/num2name_file'
num2name_file_path = os.path.join(ml_prototype_path, 'data/TempData/num2name_file')
with open(num2name_file_path, "rb") as num2name_file:
label_dict_num2name = pickle.load(num2name_file)
predicted_result = label_dict_num2name[predicted_label[0]]
print("savedModel Sample 0 predict label name: ", predicted_result)
# return result in json format
output = {}
output['result'] = predicted_result
return jsonify(output)
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
"""
Unit tests.
See doc/devel.md for usage instructions.
"""
import json
import logging
import re
import sys
from os import environ
from traceback import format_exc
from unittest import TestCase
import psycopg2
import pg_jts
logging.basicConfig()
verbosity = sys.argv.count('-v')
if verbosity == 0:
level = logging.WARNING
elif verbosity == 1:
level = logging.INFO
else:
level = logging.DEBUG
logging.getLogger().setLevel(level)
postgresql_dsn = environ.get('POSTGRESQL_DSN')
class TestPGJTS(TestCase):
def setUp(self):
# create database test_pg_jts
conn = psycopg2.connect(postgresql_dsn)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("CREATE DATABASE test_pg_jts")
cur.execute("COMMENT ON DATABASE test_pg_jts IS %s", ('Testing pg_jts',))
cur.close()
conn.close()
# connect to database test_pg_jts
self.dsn = postgresql_dsn.replace('dbname=postgres', 'dbname=test_pg_jts')
if self.dsn == postgresql_dsn: # safety check
print('Please use "dbname=postgres" in POSTGRESQL_DSN !')
sys.exit(1)
self.conn = psycopg2.connect(self.dsn)
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cur = self.conn.cursor()
def sql(self, sql_, *args):
"""
Helper method for executing sql.
"""
try:
self.cur.execute(sql_, *args)
except:
print(format_exc())
self.conn.rollback()
def jts(self):
"""
Helper method for running `pg_jts.get_database`.
"""
jts, n = pg_jts.get_database(self.dsn)
return jts
### todo test n
def test_empty_database(self):
jts = self.jts()
try:
ts = json.loads(jts)
except:
ts = None
self.assertIsInstance(ts, dict)
keys1 = set(ts.keys())
keys2 = set([
'source', 'source_version', 'database_name',
'database_description', 'generation_begin_time',
'generation_end_time', 'datapackages', 'inheritance',
])
self.assertEqual(keys1, keys2)
self.assertEqual(ts['source'], 'PostgreSQL')
self.assertEqual(ts['database_name'], 'test_pg_jts')
self.assertEqual(ts['database_description'], 'Testing pg_jts')
self.assertEqual(ts['datapackages'],
[{'datapackage': 'public', 'resources': []}])
self.assertEqual(ts['inheritance'], [])
def test_single_table(self):
self.sql("""CREATE TABLE "table '*Ù" (id SERIAL)""")
self.sql("""COMMENT ON TABLE "table '*Ù" IS 'That''s w@îrd'""")
dp1 = json.loads(self.jts())['datapackages']
dp2 = [{
'datapackage': 'public',
'resources': [
{
'name': "table '*Ù",
'description': 'That\'s w@îrd',
'foreignKeys': [],
'fields': [
{'name': 'id',
'type': 'int',
'default_value': '"table \'\'*Ù_id_seq"()',
'constraints': {'required': True},
},
],
'indexes': [],
},
],
}]
self.assertEqual(dp1, dp2)
def test_data_types_and_field_names(self):
self.sql("""CREATE TABLE table1 (
id SERIAL,
i1 smallint null,
i2 int not null,
i3 bigint,
ia1 bigint[][] not null,
"*-\_/'Ò" bool,
t1 text
)""")
fields1 = json.loads(self.jts())['datapackages'][0]['resources'][0]['fields']
fields2 = [
{
'name': 'id',
'type': 'int',
'default_value': 'table1_id_seq()',
'constraints': {'required': True},
},
{
'name': 'i1',
'type': 'smallint',
'constraints': {'required': False},
},
{
'name': 'i2',
'type': 'int',
'constraints': {'required': True},
},
{
'name': 'i3',
'type': 'bigint',
'constraints': {'required': False},
},
{
'name': 'ia1',
'type': 'bigint[][]',
'constraints': {'required': True},
},
{
'name': "*-\_/'Ò",
'type': 'bool',
'constraints': {'required': False},
},
{
'name': 't1',
'type': 'text',
'constraints': {'required': False},
},
]
print(fields1)
print(fields2)
# verify both field lists are equal
self.assertEqual([f for f in fields1 if f not in fields2], [])
self.assertEqual([f for f in fields2 if f not in fields1], [])
def test_column_comments(self):
... # TODO
def tearDown(self):
# disconnect from database test_pg_jts
self.cur.close()
self.conn.close()
# drop database test_pg_jts
conn = psycopg2.connect(postgresql_dsn)
conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
cur = conn.cursor()
cur.execute("DROP DATABASE test_pg_jts")
cur.close()
conn.close()
|
import urllib
import urllib.request
import json
import os
class Version:
def __init__(self, versionString):
"""semantic version (major.minor.patch)"""
self.setString(versionString)
def __str__(self):
return f"{self.major}.{self.minor}.{self.patch}"
def setNumbers(self, major=None, minor=None, patch=None):
self.major = int(major) if major else self.major
self.minor = int(minor) if minor else self.minor
self.patch = int(patch) if patch else self.patch
def setString(self, versionString):
assert isinstance(versionString, str)
parts = versionString.split(".")
assert len(parts) == 3
self.setNumbers(*parts)
def increase(self, major=False, minor=False, patch=True):
self.major = self.major + 1 if major else self.major
self.minor = self.minor + 1 if minor else self.minor
self.patch = self.patch + 1 if patch else self.patch
def GetOnlineVersion(package="scottplot"):
"""
Returns the version of the NuGet package online
"""
print(f"checking the web for the latest {package} version...")
url = f"https://api.nuget.org/v3/registration4/{package}/index.json"
response = urllib.request.urlopen(url)
data = response.read()
jsonText = data.decode('utf-8')
parsed = json.loads(jsonText)
#print(json.dumps(parsed, indent=4, sort_keys=True))
version = Version(parsed["items"][0]["upper"])
print(f"latest version of {package} is: {version}")
return version
def SetProjectVersion(csprojPath, newVersion):
projectName = os.path.basename(csprojPath)
print(f"upgrading {projectName} to version {newVersion}...")
with open(csprojPath) as f:
lines = f.read().split("\n")
for i, line in enumerate(lines):
if "Version" in line and "</" in line and not "LangVersion" in line:
a, b = line.split(">", 1)
b, c = b.split("<", 1)
line = a + ">" + newVersion + "<" + c
lines[i] = line
with open(csprojPath, 'w') as f:
f.write("\n".join(lines))
def increasePackageVersions(PROJECT_FILES):
version = GetOnlineVersion()
version.increase()
newVersion = str(version)
for projectFile in PROJECT_FILES:
SetProjectVersion(projectFile, newVersion)
return version
|
from django.http import HttpRequest
from rest_framework.views import APIView
from rest_framework.response import Response
from genie.services import NotebookJobServices, Connections, NotebookTemplateService
from rest_framework.decorators import api_view
class NotebookOperationsView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def post(self, request, notebookId):
res = NotebookJobServices.cloneNotebook(notebookId, request.data)
return Response(res.json())
def delete(self, request, notebookId):
res = NotebookJobServices.deleteNotebook(notebookId)
return Response(res.json())
class NotebookActionsView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def post(self, request, notebookId):
res = NotebookJobServices.runNotebookJob(notebookId)
return Response(res.json())
def delete(self, request, notebookId):
res = NotebookJobServices.stopNotebookJob(notebookId)
return Response(res.json())
def put(self, request, notebookId):
res = NotebookJobServices.clearNotebookResults(notebookId)
return Response(res.json())
class NotebooksLight(APIView):
"""
Get concise notebook data
"""
def get(self, request):
res = NotebookJobServices.getNotebooksLight()
return Response(res.json())
class NotebookView(APIView):
"""
Class to get notebooks from zeppelin server
"""
def get(self, request, offset: int):
res = NotebookJobServices.getNotebooks(offset)
return Response(res.json())
def post(self, request):
res = NotebookJobServices.addNotebook(request.data)
return Response(res.json())
class NotebookJobView(APIView):
"""
Class to get, add and update a NotebookJob details
The put and post methods only require request body and not path parameters
The get method requires the notebookJobId as the path parameter
"""
def get(self, request, notebookId=None):
offset = int(request.GET.get("offset", 0))
res = NotebookJobServices.getNotebookJobDetails(notebookId=notebookId, runStatusOffset=offset)
return Response(res.json())
def post(self, request):
notebookId = request.data["notebookId"]
scheduleId = request.data["scheduleId"]
res = NotebookJobServices.addNotebookJob(notebookId=notebookId, scheduleId=scheduleId)
return Response(res.json())
def put(self, request):
notebookId = request.data["notebookId"]
if "scheduleId" in request.data:
scheduleId = request.data["scheduleId"]
res = NotebookJobServices.updateNotebookJob(notebookId=notebookId, scheduleId=scheduleId)
elif "enabled" in request.data:
enabled = request.data["enabled"]
res = NotebookJobServices.toggleNotebookJob(notebookId=notebookId, enabled=enabled)
return Response(res.json())
def delete(self, request, notebookId=None):
res = NotebookJobServices.deleteNotebookJob(notebookId=notebookId)
return Response(res.json())
class ScheduleView(APIView):
"""
Class to get and add available crontab schedules
"""
def get(self, request):
res = NotebookJobServices.getSchedules()
return Response(res.json())
def post(self, request):
name = request.data["name"]
cron = request.data["crontab"]
timezone = request.data["timezone"]
res = NotebookJobServices.addSchedule(cron=cron, timezone=timezone, name=name)
return Response(res.json())
def put(self,request):
id = request.data["id"]
name = request.data["name"]
cron = request.data["crontab"]
timezone = request.data["timezone"]
res = NotebookJobServices.updateSchedule(id=id, cron=cron, timezone=timezone, name=name)
return Response(res.json())
@api_view(["GET", "PUT", "DELETE"])
def schedule(request: HttpRequest, scheduleId: int) -> Response:
"""
Method for crud operations on a single connection
:param request: HttpRequest
:param connection_id: Connection Id
"""
if request.method == "GET":
res = NotebookJobServices.getSingleSchedule(scheduleId)
return Response(res.json())
if request.method == "DELETE":
res = NotebookJobServices.deleteSchedule(scheduleId)
return Response(res.json())
class TimzoneView(APIView):
"""
Class to get standard pytz timezones
"""
def get(self, request):
res = NotebookJobServices.getTimezones()
return Response(res.json())
# TODO
# Change connection views to class
@api_view(["GET", "POST"])
def connections(request: HttpRequest) -> Response:
"""
Method to get or add connection
:param request: HttpRequest
"""
if request.method == "GET":
res = Connections.getConnections()
return Response(res.json())
elif request.method == "POST":
res = Connections.addConnection(request.data)
return Response(res.json())
@api_view(["GET", "PUT", "DELETE"])
def connection(request: HttpRequest, connection_id: int) -> Response:
"""
Method for crud operations on a single connection
:param request: HttpRequest
:param connection_id: Connection Id
"""
if request.method == "GET":
res = Connections.getConnection(connection_id)
return Response(res.json())
elif request.method == "DELETE":
res = Connections.removeConnection(connection_id)
return Response(res.json())
elif request.method == "PUT":
res = Connections.updateConnection(connection_id, request.data)
return Response(res.json())
@api_view(["GET", "POST"])
def connectionTypes(request: HttpRequest) -> Response:
"""
Method to get all connection types
:param request: HttpRequest
"""
if request.method == "GET":
res = Connections.getConnectionTypes()
return Response(res.json())
@api_view(["POST"])
def datasetDetails(request: HttpRequest) -> Response:
"""
Method to get dataset details from s3 location
:param request: HttpRequest
"""
datasetLocation = request.data.get("datasetLocation")
datasourceName = request.data.get("datasourceName")
res = NotebookTemplateService.getDatasetDetails(datasetLocation, datasourceName)
return Response(res.json())
class NotebookTemplateView(APIView):
def get(self, request):
res = NotebookTemplateService.getNotebookTemplates()
return Response(res.json())
|
# -*- coding: utf-8 -*-
# Operational Libs
import collections
import functools
import logging
import os
# Dash Libs
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output, ALL
# Analytic Libs
import pandas as pd
import numpy as np
import math
LOG = logging.getLogger(__name__)
logging.basicConfig(level="INFO")
# In production, the CSV files are at this project on Civis Platform:
# https://platform.civisanalytics.com/spa/#/projects/135876
CIVIS_PROJECT_ID = 135876
LOCAL_DATA_DIR = "data"
DEV_CSV_FILENAMES = [
"dash_trajectoriesDat_baseline.csv", # file ID: 103222548
"dash_trajectoriesDat_june1partial10.csv", # file ID: 103222598
"dash_trajectoriesDat_june1partial30.csv", # file ID: 103222650
]
EMS_MAPPING = {"North-Central": (1, 2), "Central": (3, 6), "Southern": (4, 5), "Northeast": (7, 8, 9, 10, 11)}
N_SLIDER_MARKS = 5
N_CHART_COLUMNS = 2
app = dash.Dash(__name__, prevent_initial_callbacks=True)
# Mark the correct server for Heroku or Civis deployment
server = app.server
if os.getenv("CIVIS_SERVICE_VERSION"):
# This environment variable will be set when deployed in Civis
import civis
client = civis.APIClient()
CSV_FILES = civis.find(
client.projects.get(CIVIS_PROJECT_ID).files,
file_name=lambda filename: filename.endswith(".csv"),
)
logging.info("%d CSV files found", len(CSV_FILES))
else:
CSVFile = collections.namedtuple("CSVFile", ("id", "file_name"))
CSV_FILES = [
CSVFile(id=None, file_name=file_name)
for file_name in DEV_CSV_FILENAMES
]
# Setup
#############################################################################
# Color Options
COLORS = {
'sf': '#1798c1',
'green': '#416165', # Color for plots & text
'beige': '#F7F7FF', #Color for gridlinesgit
}
# RangeSlider values need to be ints - convert to unix timestamp
def dtToUnix (dt):
''' Convert datetime to Unix Milliseconds
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#from-timestamps-to-epoch
'''
unixTime = (dt - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
return unixTime
# Convert unix Time back to datetime
def unixToDt (unixTime):
''' Convert Unix milliseconds to datetime '''
return pd.to_datetime(unixTime, unit='s')
@functools.lru_cache(maxsize=4)
def get_df(csv_file_path):
"""Get pandas DataFrame from CSV and apply pre-processing.
Parameters
----------
csv_file_path : str
Returns
-------
pd.DataFrame
"""
df = pd.read_csv(csv_file_path)
# Filter out timeframes for graphs
# Generate datetime to get weeks for slider
df['date'] = pd.to_datetime(df['date'])
# Get week of date
df['week'] = df['date'] - pd.to_timedelta(df['date'].dt.weekday, unit='d')
# Get month of date
df['month'] = df['date'].values.astype('datetime64[M]')
# Map EMS to Groupings
ems_to_region_mapping = {x: region for region, v in EMS_MAPPING.items() for x in v}
# Create a column for later filtering
df['emsGroup'] = df['ems'].map(ems_to_region_mapping)
return df
def get_param_slider(param, i, marks):
return html.Div(
style={"margin": "25px 5px 30px 0px"},
children=[
html.Div(
style={"margin-left": "5px"},
children=[
html.P(
param.replace("param_", "").upper(),
className="control_label",
),
dcc.RangeSlider(
id={
"type": "parameter",
"index": i,
},
step=None,
marks={
m: "{:.1%}".format(m) if m < 1 else "{:.2f}".format(m)
for m in marks
},
min=marks[0],
max=marks[-1],
value=[marks[0], marks[-1]],
)
]
)
]
)
def get_formatted_charts(charts, columns=N_CHART_COLUMNS):
result = []
charts_in_row = []
for chart in charts:
if len(charts_in_row) == columns:
div = html.Div(
charts_in_row,
className="flex-display chartContainerDiv ",
)
result.append(div)
charts_in_row = []
graph = dcc.Graph(figure=chart)
charts_in_row.append(html.Div([graph], className="graphDiv"))
if charts_in_row:
div = html.Div(
charts_in_row,
className="flex-display chartContainerDiv ",
)
result.append(div)
return result
def get_param_names(df):
"""Get the parameter names from df.
Since the parameters are dynamically retrieved from the df,
this function ensures that we have a consistent and correct mapping
between their names and values across the callbacks.
"""
return sorted(col for col in df.columns if col.startswith("param_"))
def get_week_slider(df):
dates = sorted(df["month"].unique())
week_slider = [
dcc.RangeSlider(
id="timeSlider",
min=dtToUnix(dates[0]),
max=dtToUnix(dates[-1]),
value=[dtToUnix(dates[0]), dtToUnix(dates[-1])],
marks= {
dtToUnix(dt): {
'label': np.datetime_as_string(dt, unit='M'),
'style': {
'transform':'rotate(45deg)',
'font-size':'8px',
}
} for i, dt in enumerate(dates)
},
allowCross=False,
className="",
),
]
return week_slider
def get_param_sliders(df):
sliders = []
for i, param in enumerate(get_param_names(df)):
marks = np.linspace(
math.floor(df[param].min() * 1000) / 1000,
math.ceil(df[param].max() * 1000) / 1000,
N_SLIDER_MARKS,
)
slider = get_param_slider(param, i, marks)
sliders.append(slider)
return sliders
# Main App Layout
#############################################################################
app.layout = html.Div(
children = [
html.Div(id="div-csv-file-name", style={"display": "none"}),
# Header
html.Div(
children =[
html.H4("Northwestern COVID Simulation Model Explorer", id="title",),
html.P("Model Version: <Date of Model Run>", id="subtitle",),
],
id="pageTitleContainer",
className="pretty_container title",
),
# Container holding selectors on left and charts on right
html.Div(
[
html.Div(
[
# CSV File Selector
html.Div(
[
html.P(
"Select CSV File:",
className="control_label",
),
dcc.Dropdown(
options=[
{"label": f.file_name,
"value": f.file_name}
for f in CSV_FILES],
multi=False,
placeholder="Choose CSV File",
id="csvDropdown",
className="dcc_control",
),
],
className="time-container",
),
html.Div(
[
# EMS Selector
html.Div(
[
html.P(
"Select EMS Region:",
className="control_label",
),
dcc.Dropdown(
options=[{'label': name, 'value': name} for name in EMS_MAPPING],
multi=False,
placeholder="Choose EMS Region",
id="emsDropdown",
className="dcc_control",
),
],
className="time-container one-half column",
),
# Toggle For Charts
html.Div(
[
html.P(
"This could be radio or dropdown:",
className="control_label",
),
dcc.RadioItems(
options=[
{'label': 'New York City', 'value': 'NYC'},
{'label': 'Montréal', 'value': 'MTL'},
{'label': 'San Francisco', 'value': 'SF'}
],
value='MTL',
id="chartToggle",
className="dcc_control",
),
],
className="time-container one-half column",
),
],
className="flex-display"
),
# Week Selector
html.Div(
[
html.P(
"Filter Graphs by Week:",
className="control_label",
),
html.Div(
# `children` is dynamically updated.
[dcc.RangeSlider(
id="timeSlider",
min=dtToUnix(np.datetime64("2020-02")),
max=dtToUnix(np.datetime64("2021-02")),
value=[dtToUnix(np.datetime64("2020-02")),
dtToUnix(np.datetime64("2021-02"))],
)],
className="dcc_control",
id="week-slider",
),
],
className="time-container",
),
# Parameter Sliders
html.Div(
[
html.P(
"Filter Graphs by Model Parameters:",
className="control_label",
),
html.Div(
# `children` is dynamically updated.
[get_param_slider("", 0, [0, 1])],
className="dcc_control",
id="param-sliders",
),
],
className="time-container",
),
],
className="four columns"
),
# Container for all Output Charts
html.Div(
[
# Rendered Warning
html.Div(
[
html.P(
"Note: rendering may take a few seconds after adjusting parameters",
className="control_label",
),
],
),
html.Div(
id="output-charts",
className="chartContainer",
),
],
className="eight columns"
),
],
className="flex-display"
),
# Footer Info
html.Div(
children=[
html.P("Keith Walewski | Questions? - keith.walewski@gmail.com ",
className="",
id="footer"
),
],
className="pretty-container"
),
],
className="mainContainer",
id="",
)
# Callback
#############################################################################
@app.callback(
[
Output("week-slider", "children"),
Output("param-sliders", "children"),
],
[Input("div-csv-file-name", "children")],
)
def set_sliders(csv_file_path):
df = get_df(csv_file_path)
week_slider = get_week_slider(df)
param_sliders = get_param_sliders(df)
return week_slider, param_sliders
@app.callback(
Output("div-csv-file-name", "children"),
[Input("csvDropdown", "value")],
)
@functools.lru_cache(maxsize=4)
def set_csv_file_name_and_download(csv_filename):
"""Set CSV file path in the app and download the CSV.
Parameters
----------
csv_filename : str
Returns
-------
str
CSV file path
"""
if not os.path.exists(LOCAL_DATA_DIR):
os.mkdir(LOCAL_DATA_DIR)
csv_path = os.path.join(LOCAL_DATA_DIR, csv_filename)
if os.path.exists(csv_path):
# If the CSV already exists on disk, just use it.
pass
else:
# The CSV has to come from Civis Platform.
file_id = civis.find_one(CSV_FILES, file_name=csv_filename).id
if file_id is None:
raise ValueError(f"CSV file not retrievable without a Civis file ID")
civis.io.civis_to_file(file_id, csv_path)
logging.info("CSV downloaded to %s", csv_path)
return csv_path
@app.callback(
Output("output-charts", "children"),
[
Input('div-csv-file-name', 'children'),
Input('emsDropdown', 'value'),
Input('timeSlider', 'value'),
Input({"type": "parameter", "index": ALL}, "value"),
],
)
def generateOutput(csv_file_path, emsValue, timeValues, paramValues):
df = get_df(csv_file_path)
params = get_param_names(df)
# Generate query string for EMS value and range of sliders
emsString = "({0} == '{1}')".format('emsGroup', emsValue)
# Rangeslider passes values for the bottom and top of the range as a list [bottom, top]
# Filter RangeSlider for timeValues - inclusive of selected timeframe
timeString = "({0} >= '{1}') & ({0} <= '{2}')".format('week', unixToDt(timeValues[0]).strftime("%Y-%m-%d"), unixToDt(timeValues[1]).strftime("%Y-%m-%d"))
# Filter RangeSlider for Parameter Values
paramString = " & ".join(["(`{0}` >= {1}) & (`{0}` <= {2})".format(param, pvalue[0], pvalue[1]) for param, pvalue in zip(params, paramValues)])
strings = [emsString, timeString, paramString]
queryString = " & ".join(strings)
# Filter data frame given the slider inputs
dff = df.query(queryString)
# List of columns to group by
groupbyList = ['date']
def getQuantile(n):
''' Function to generate quantiles for groupby, returns quantile '''
def _getQuantile(x):
return x.quantile(n)
_getQuantile.__name__ = 'quantile_{:2.2f}'.format(n*100)
return _getQuantile
# Function list passed to aggregation
func_list = ['mean', 'sum', getQuantile(.025), getQuantile(.975), getQuantile(.25), getQuantile(.75)]
outputs = sorted(col for col in df.columns if col.startswith("output_"))
dfg = dff.groupby(groupbyList)[outputs].agg(func_list).reset_index()
def makeChart (outputVar):
# Generate Figure for plotting
figure = go.Figure()
# Add traces - shades between IQR and 2.5-97.5
figure.add_trace(go.Scatter(
x=dfg['date'],
y=dfg.loc[:, (outputVar, 'quantile_2.50')],
mode='lines',
opacity=0.3,
line=dict(color=COLORS['green'], width=0),
fill=None,
)
)
figure.add_trace(go.Scatter(
x=dfg['date'],
y=dfg.loc[:, (outputVar, 'quantile_97.50')],
mode='lines',
opacity=0.3,
line=dict(color=COLORS['green'], width=0),
fill='tonexty', # fill area between this and previous trace
)
)
figure.add_trace(go.Scatter(
x=dfg['date'],
y=dfg.loc[:, (outputVar, 'quantile_25.00')],
mode='lines',
opacity=0.3,
line=dict(color=COLORS['green'], width=0),
fill=None,
)
)
figure.add_trace(go.Scatter(
x=dfg['date'],
y=dfg.loc[:, (outputVar, 'quantile_75.00')],
mode='lines',
opacity=0.3,
line=dict(color=COLORS['green'], width=0),
fill='tonexty',
)
)
figure.update_layout(
font=dict(
family="Open Sans, monospace",
size=14,
color=COLORS['green']
),
title=outputVar.replace("output_", "").upper() + " by Date",
showlegend=False,
yaxis=dict(
tickformat="<,f",
gridcolor=COLORS['beige'],
gridwidth=2,
),
xaxis=dict(
showgrid=False,
),
plot_bgcolor='white',
paper_bgcolor='white',
)
return figure
charts = [makeChart(output) for output in outputs]
return get_formatted_charts(charts)
if __name__ == '__main__':
app.run_server(debug=True)
|
#!/usr/bin/env python3
""" simplest flask example
run from CLI with:
python flask_app.py
"""
from flask import Flask
APP = Flask(__name__)
@APP.route('/')
def hello_world():
""" say hello """
return "Hello, world!"
if __name__ == "__main__":
APP.run(debug=True, host='0.0.0.0')
|
import mmcv
import numpy as np
import pycocotools.mask as mask_util
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule
from mmdet.core import mask_target, force_fp32, auto_fp16
import matplotlib.pyplot as plt
import kornia
@HEADS.register_module
class FCNMaskHead(nn.Module):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
upsample_method='deconv',
upsample_ratio=2,
num_classes=81,
class_agnostic=False,
conv_cfg=None,
norm_cfg=None,
use_maskopt=False,
generate_weight=False,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
super(FCNMaskHead, self).__init__()
if upsample_method not in [None, 'deconv', 'nearest', 'bilinear']:
raise ValueError(
'Invalid upsample method {}, accepted methods '
'are "deconv", "nearest", "bilinear"'.format(upsample_method))
self.generate_weight = generate_weight
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size # WARN: not used and reserved
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = upsample_method
self.upsample_ratio = upsample_ratio
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
# TODO: change here
self.use_maskopt = use_maskopt
# if use_maskopt:
# self.edge_det = kornia.filters.Sobel()
# upsample_in_channels = (
# self.conv_out_channels if self.num_convs > 0 else in_channels)
# out_channels = 1 if self.class_agnostic else self.num_classes
# self.convs = nn.ModuleList()
# for i in range(self.num_convs):
# in_channels = (
# self.in_channels if i == 0 else self.conv_out_channels)
# padding = (self.conv_kernel_size - 1) // 2
# self.convs.append(
# ConvModule(
# in_channels,
# self.conv_out_channels,
# self.conv_kernel_size,
# padding=padding,
# conv_cfg=conv_cfg,
# norm_cfg=norm_cfg))
# if self.upsample_method is None:
# self.upsample = None
# elif self.upsample_method == 'deconv':
# self.upsample = nn.ConvTranspose2d(
# upsample_in_channels,
# self.conv_out_channels,
# self.upsample_ratio,
# stride=self.upsample_ratio)
# else:
# self.upsample = nn.Upsample(
# scale_factor=self.upsample_ratio, mode=self.upsample_method)
# logits_in_channel = (
# self.conv_out_channels
# if self.upsample_method == 'deconv' else upsample_in_channels)
# self.conv_logits = nn.Conv2d(logits_in_channel, out_channels+1, 1)
# else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
self.upsample = nn.ConvTranspose2d(
upsample_in_channels,
self.conv_out_channels,
self.upsample_ratio,
stride=self.upsample_ratio)
else:
self.upsample = nn.Upsample(
scale_factor=self.upsample_ratio, mode=self.upsample_method)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = nn.Conv2d(logits_in_channel, out_channels, 1)
# if self.generate_weight:
# self.weights_predict = nn.Sequential(
# nn.Linear(256 * 2, 1024),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(1024, 1024),
# nn.ReLU(True),
# nn.Dropout(),
# nn.Linear(1024, (self.conv_out_channels+1)*self.num_classes), # +1 for bias
# )
# loss_cls=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.1)
# self.loss_cls = build_loss(loss_cls)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
self.ws = 3
def init_weights(self):
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x, query_feat=None, ref_feat=None, num_pos=None, num_cls_share=None):
# if num_cls_share is None:
# num_cls_share = len(self.convs)
# for conv in self.convs[:num_cls_share]:
# x = conv(x)
# if self.generate_weight:
# cls_feat = F.adaptive_avg_pool2d(x, 1)
# cls_feat = cls_feat.view(cls_feat.size(0), 1, -1)
# predicted = self.weights_predict(torch.cat([query_feat, ref_feat], 1))
# weight, bias = predicted.view(-1, self.num_classes,
# self.conv_out_channels+1).split(self.conv_out_channels, 2)
# cls_score = ((weight * cls_feat).sum(2, keepdim=True) + bias).view(-1, self.num_classes)
# if num_pos is not None:
# x = x[:num_pos]
# for conv in self.convs[num_cls_share:]:
# x = conv(x)
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
if self.use_maskopt:
edge_pred, mask_pred = mask_pred.split(2, dim=1)
return edge_pred, mask_pred
if self.generate_weight:
return cls_score, mask_pred
return mask_pred
def get_target(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels, num_pos=None):
loss = dict()
# if self.generate_weight:
# cls_score, mask_pred = mask_pred
# label_weights = torch.ones(len(cls_score)).to(labels.device)
# avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
# loss['loss_cls_mask'] = self.loss_cls(
# cls_score,
# labels,
# label_weights,
# avg_factor=avg_factor,
# reduction_override=None)
# if num_pos is not None:
# labels = labels[:num_pos]
# if self.use_maskopt:
# edge_pred, mask_pred = mask_pred
# device = edge_pred.device
# N, H, W = mask_targets.shape
# with torch.no_grad():
# edges = self.edge_det(mask_targets.unsqueeze(0)).squeeze(0)
# edges[:, 0, :] = torch.where(mask_targets[:, 0, :]==1, mask_targets[:, 0, :], edges[:, 0, :])
# edges[:, :, 0] = torch.where(mask_targets[:, :, 0]==1, mask_targets[:, :, 0], edges[:, :, 0])
# edges[:, H-1, :] = torch.where(mask_targets[:, H-1, :]==1, mask_targets[:, H-1, :], edges[:, H-1, :])
# edges[:, :, W-1] = torch.where(mask_targets[:, :, W-1]==1, mask_targets[:, :, W-1], edges[:, :, W-1])
# edge_targets = (edges > 0.25).long()
# weight = torch.tensor([(edges==1).sum(), (edges==0).sum()]).float() / edges.numel()
# edge_area = F.conv2d(edges.unsqueeze(1).float(), torch.ones(1, 1, self.ws, self.ws).to(device),
# padding=self.ws//2)
# loss_edge = F.cross_entropy(edge_pred, edge_targets, weight.to(device))
# loss['loss_edge'] = loss_edge
# # loss_mask = F.binary_cross_entropy_with_logits(mask_pred[edge_area > 0],
# # mask_targets.unsqueeze(1)[edge_area > 0].float())
# loss_mask = F.binary_cross_entropy_with_logits(mask_pred,
# mask_targets.unsqueeze(1).float())
# else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets, torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, mask_feats, ref_feats, det_bboxes, det_labels, real_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale, gt_masks=None):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class+1, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
img_shape (Tensor): shape (3, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape: original image size
Returns:
list[list]: encoded masks
"""
if isinstance(mask_pred, tuple):
edge_pred, mask_pred = mask_pred
# if False:
# edge_pred, mask_pred = mask_pred
# edge_pred = edge_pred.argmax(1, keepdim=True).float()
# device = mask_pred.device
# # edge_area = F.conv2d(edge_pred, torch.ones(1, 1, self.ws, self.ws).to(device), padding=self.ws//2)
# # a = torch.where(edge_area > 0, mask_pred.sigmoid() * 2 - 1, torch.zeros_like(mask_pred))
# # a = torch.where(edge_area > 0, mask_pred.tanh(), torch.zeros_like(mask_pred))
# a_0 = torch.where(mask_pred > 0, torch.ones_like(mask_pred), -torch.ones_like(mask_pred)) # can change to binary
# a = torch.where(edge_pred > 0, a_0, torch.zeros_like(mask_pred))
# # b = F.cosine_similarity(mask_feats.unsqueeze(1), ref_feats, dim=2)
# # b = F.interpolate(b, a.shape[-2:], mode='bilinear', align_corners=True)
# alpha, beta, gamma, delta, lambd = 1, 1, 1, 1, 1e-1
# n_iters = 100
# # c = alpha * a + beta * b.mean(1, keepdim=True)
# # f = torch.tensor([ [0, 1/4, 0],
# # [1/4, 0, 1/4],
# # [0, 1/4, 0]])[None, None, :, :].to(device)
# f = torch.tensor([ [0, 1, 0],
# [1, 0, 1],
# [0, 1, 0]])[None, None, :, :].float().to(device)
# H, W = a.shape[-2:]
# divide = torch.ones(H, W) * 1/4
# divide[0, :] = 1/3
# divide[H-1, :] = 1/3
# divide[:, 0] = 1/3
# divide[:, W-1] = 1/3
# divide[0, 0] = 1/2
# divide[0, W-1] = 1/2
# divide[H-1, 0] = 1/2
# divide[H-1, W-1] = 1/2
# divide = divide[None, None, :, :].float().to(device)
# # plt.matshow(edge_pred[0, 0].data.cpu().numpy())
# # plt.savefig('edge.jpg')
# # plt.matshow(a_0[0, 0].data.cpu().numpy())
# # plt.savefig('qual1.jpg')
# d = a_0
# for i in range(n_iters):
# d_avg = F.conv2d(d, f, padding=1) * divide
# # exp = alpha * a * torch.exp(-(a*d).sum(dim=[2,3], keepdim=True))
# sigmoid = torch.sigmoid(-(a*d).sum(dim=[2,3], keepdim=True))
# exp = alpha * a * sigmoid * (1 - sigmoid)
# d = exp + d_avg
# # print(d.min().item(), d.max().item())
# # plt.matshow(d[0, 0].data.cpu().numpy())
# # plt.savefig('qual_end.jpg')
# # exit()
# mask_pred = (d + 1) / 2
# # d_old = mask_pred
# # for i in range(n_iters):
# # d_g = (gamma + delta) * d
# # d_g -= delta*F.conv2d(d, f, padding=1)
# # d_g -= c * torch.exp(-(c * d).sum(dim=[0, 1, 2, 3], keepdim=True))
# # # d_g -= c * torch.exp(-(c * d))
# # # d_g -= alpha * a * torch.exp(-alpha * (a * d))
# # # d_g -= beta * b * torch.exp(-beta * (b * d))
# # d = d - lambd * d_g
# # if torch.norm(d - d_old) < 0.01:
# # break
# # d_old = d
# # mask_pred = d
# else:
mask_pred = mask_pred.sigmoid()
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.cpu().numpy()
assert isinstance(mask_pred, np.ndarray)
# when enabling mixed precision training, mask_pred may be float16
# numpy array
mask_pred = mask_pred.astype(np.float32)
cls_segms = [[] for _ in range(80)]
bboxes = det_bboxes.cpu().numpy()[:, :4]
labels = det_labels.cpu().numpy() + 1
real_labels = real_labels.cpu().numpy()
if rescale:
img_h, img_w = ori_shape[:2]
else:
img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
scale_factor = 1.0
for i in range(bboxes.shape[0]):
bbox = (bboxes[i, :] / scale_factor).astype(np.int32)
label = labels[i]
real_label = real_labels[i]
w = max(bbox[2] - bbox[0] + 1, 1)
h = max(bbox[3] - bbox[1] + 1, 1)
if not self.class_agnostic and not self.use_maskopt:
mask_pred_ = mask_pred[i, label, :, :]
else:
mask_pred_ = mask_pred[i, 0, :, :]
im_mask = np.zeros((img_h, img_w), dtype=np.uint8)
if gt_masks is not None:
bbox_mask = mmcv.imresize(gt_masks[i][0].cpu().numpy(), (w, h))
else:
bbox_mask = mmcv.imresize(mask_pred_, (w, h))
bbox_mask = (bbox_mask > rcnn_test_cfg.mask_thr_binary).astype(
np.uint8)
im_mask[bbox[1]:bbox[1] + h, bbox[0]:bbox[0] + w] = bbox_mask
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F'))[0]
cls_segms[real_label].append(rle)
return cls_segms, mask_pred[:, 0:1]
|
# Basic arcade program using objects
# Draw shapes on screen
# Imports
import arcade
# Constants
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Draw Shapes"
# Classes
class Welcome(arcade.Window):
"""Our main welcome window
"""
def __init__(self):
"""Initialize the window
"""
# Call the parent class constructor
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Set the background window
arcade.set_background_color(arcade.color.WHITE)
def on_draw(self):
"""Called whenever we need to draw our window
"""
# Clear the screen and start drawing
arcade.start_render()
# Draw a blue arc
arcade.draw_arc_filled(100, 100, 40, 40, arcade.color.BLUE, 0, 125)
# Draw a red ellipse
arcade.draw_ellipse_outline(
300, 100, 60, 30, arcade.color.RED, border_width=2
)
# Draw some purple lines
arcade.draw_line(500, 100, 550, 100, arcade.color.PURPLE)
arcade.draw_line(500, 90, 550, 90, arcade.color.PURPLE, line_width=2)
arcade.draw_line(500, 80, 550, 80, arcade.color.PURPLE, line_width=3)
# Draw an orange parabola
arcade.draw_parabola_filled(100, 100, 130, 120, arcade.color.ORANGE)
# Draw a black point
arcade.draw_point(300, 300, arcade.color.BLACK, 20)
# Draw a green polygon
points_list = [
[500, 300],
[550, 300],
[575, 325],
[550, 350],
[525, 340],
]
arcade.draw_polygon_outline(
points_list, arcade.color.GREEN, line_width=5
)
# Draw some rectangles
arcade.draw_rectangle_filled(100, 500, 150, 75, arcade.color.AZURE)
arcade.draw_lrtb_rectangle_filled(
150, 250, 575, 525, arcade.color.AMARANTH_PINK
)
arcade.draw_xywh_rectangle_filled(
200, 550, 150, 75, arcade.color.ASPARAGUS
)
# Draw some triangles
arcade.draw_triangle_filled(
400, 500, 500, 500, 450, 575, arcade.color.DEEP_RUBY
)
# Main code entry point
if __name__ == "__main__":
app = Welcome()
arcade.run()
|
from Operations.MiscUtil import namedtuple, ApplyToResult
from Operations.Shari_Operations.localize.PopConsts import AllAges, AllPops, AllFreqs
import re
class Scenario(object):
"""Represents one simulation scenario."""
@staticmethod
def fromString( s, mutAge = 10 ):
"""Return a scenario based on its string representation"""
if isinstance( s, Scenario ): return s
if s == 'neutral': return NeutralScenario()
m = re.match( '(\d+)ky/sel(\d+)_(\d)', s )
if m: return SelectionScenario( mutAge = int( m.group( 1 ) ), mutFreq = int( m.group( 2 ) ), mutPop = int( m.group( 3 ) ) )
m = re.match( 'sel(\d+)_(\d)', s )
if m: return SelectionScenario( mutAge = mutAge, mutFreq = int( m.group( 1 ) ), mutPop = int( m.group( 2 ) ) )
raise ValueError( 'invalid scenario - ' + s )
fromStr = fromString
def isNeutral( self ): return self.is_neutral()
class NeutralScenario(Scenario, namedtuple('NeutralScenario', '')):
"""Represents a simulation of neutral evolution ( no selection )"""
def is_neutral( self ): return True
def scenName( self ):
"""Return a string representing the scenario"""
return 'neutral'
def scenDir( self ):
"""Return a string representing a subdir in which data for this scenario is stored."""
return 'neutral'
def __nonzero__(self): return True
def __str__( self ):
"""Return an informal string representation"""
return self.scenDir()
def __repr__( self ):
"""Return a formal string representation"""
return 'NeutralScenario()'
class SelectionScenario(Scenario, namedtuple('SelectionScenario', 'mutAge mutPop mutFreq')):
"""Represents a simulation of evolution where there was a selective event.
Fields:
mutAge - how long ago the good mutation arose ( in kiloyears )
mutPop - in which population the good mutation arose. this is an integer.
currently, the integer is 1,4,5 with the correspondence {1:'European',4:'EastAsian',5:'WestAfrican'}.
mutFreq - frequency of this mutation _today_ (what fraction of the population
in which the mutation arose, has this mutation today).
Note that this is only the average frequency of replicas simulated with this scenario!
In each simulated replica, the exact present-day frequency of the selected allele in the selected population
will differ, though hopefully the values across all replicas will cluster around the scenario's
specified frequency.
"""
def is_neutral( self ): return False
def scenName( self ):
"""Return a string representing the scenario (does not include age)."""
return 'sel%d_%d' % ( self.mutFreq, self.mutPop )
def scenDir( self ):
"""Return a string representing a subdir in which data for this scenario is stored."""
return '%sky/%s' % ( self.mutAge, self.scenName() )
def __str__( self ):
"""Return an informal string representation"""
return self.scenDir()
def __repr__( self ):
"""Return a formal string representation"""
return 'SelectionScenario( mutAge = %d, mutPop = %d, mutFreq = %d )' % ( self.mutAge, self.mutPop, self.mutFreq )
@ApplyToResult(tuple)
def GetScenarios( mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs, includeNeutral = True ):
"""Yield the neutral scenario plus the full set of
selection scenarios for all combinations of (mutAge, mutPop, mutFreq). The neutral scenario is always
yielded first (some code relies on this)."""
if includeNeutral: yield NeutralScenario()
for mutAge in mutAges:
for mutPop in mutPops:
for mutFreq in mutFreqs:
yield SelectionScenario( mutAge, mutPop, mutFreq )
@ApplyToResult(tuple)
def GetSelectionScenarios( mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs ):
"""Yield the selection scenarios for all combinations of (mutAge, mutPop, mutFreq)"""
return GetScenarios( mutAges, mutPops, mutFreqs, includeNeutral = False )
ScenarioSetP = namedtuple( 'ScenarioSet', 'mutAges mutPops mutFreqs scenarios' )
class ScenarioSet( ScenarioSetP ):
"""Represents a set of scenarios that includes the neutral scenario,
as well a selection scenarios for all combinations of the given
(mutAges mutPops mutFreqs).
"""
def __new__( cls, mutAges, mutPops, mutFreqs ):
return ScenarioSetP.__new__( cls, mutAges, mutPops, mutFreqs,
scenarios = tuple( GetScenarios( mutAges, mutPops, mutFreqs ) ) )
|
import dateutil.parser
from datetime import datetime
from flask import render_template
from natural.date import duration
from logviewer2.log_utils.formatter import format_content_html
class LogEntry:
def __init__(self, data, evidence=False):
self.evidence = evidence
self.key = data["key"]
self.open = data["open"]
self.created_at = dateutil.parser.parse(data["created_at"])
self.human_created_at = duration(self.created_at, now=datetime.utcnow())
self.closed_at = (
dateutil.parser.parse(data["closed_at"]) if not self.open else None
)
self.channel_id = int(data["channel_id"])
self.guild_id = int(data["guild_id"])
self.creator = User(data["creator"])
self.recipient = User(data["recipient"])
self.closer = User(data["closer"]) if not self.open else None
self.close_message = format_content_html(data.get("close_message") or "")
self.messages = [Message(m) for m in data["messages"]]
self.internal_messages = [m for m in self.messages if m.type == "internal"]
self.thread_messages = [
m for m in self.messages if m.type not in ("internal", "system")
]
@property
def system_avatar_url(self):
return "/static/icons/clyde.png"
@property
def human_closed_at(self):
return duration(self.closed_at, now=datetime.utcnow())
@property
def message_groups(self):
groups = []
if not self.messages:
return groups
curr = MessageGroup(self.messages[0].author)
for index, message in enumerate(self.messages):
next_index = index + 1 if index + 1 < len(self.messages) else index
next_message = self.messages[next_index]
curr.messages.append(message)
if message.is_different_from(next_message):
groups.append(curr)
curr = MessageGroup(next_message.author)
groups.append(curr)
return groups
def render_html(self, **kwargs):
return render_template("logbase.html", log_entry=self, **kwargs)
def render_plain_text(self):
messages = self.messages
thread_create_time = self.created_at.strftime("%d %b %Y - %H:%M UTC")
out = f"Thread created at {thread_create_time}\n"
if self.creator == self.recipient:
out += f"[R] {self.creator} "
out += f"({self.creator.id}) created a Modmail thread. \n"
else:
out += f"[M] {self.creator} "
out += f"created a thread with [R] "
out += f"{self.recipient} ({self.recipient.id})\n"
out += "────────────────────────────────────────────────\n"
if messages:
for index, message in enumerate(messages):
next_index = index + 1 if index + 1 < len(messages) else index
curr, next_ = message.author, messages[next_index].author
author = curr
user_type = "M" if author.mod else "R"
create_time = message.created_at.strftime("%d/%m %H:%M")
base = f"{create_time} {user_type} "
base += f"{author}: {message.raw_content}\n"
for attachment in message.attachments:
base += f"Attachment: {attachment.url}\n"
out += base
if curr != next_:
out += "────────────────────────────────\n"
if not self.open:
if messages: # only add if at least 1 message was sent
out += "────────────────────────────────────────────────\n"
out += f"[M] {self.closer} ({self.closer.id}) "
out += "closed the Modmail thread. \n"
closed_time = self.closed_at.strftime("%d %b %Y - %H:%M UTC")
out += f"Thread closed at {closed_time} \n"
return out
class User:
def __init__(self, data):
self.id = int(data.get("id"))
self.name = data["name"]
self.discriminator = data["discriminator"]
self.avatar_url = data["avatar_url"]
self.mod = data["mod"]
@property
def default_avatar_url(self):
return "https://cdn.discordapp.com/embed/avatars/{}.png".format(
int(self.discriminator) % 5
)
def __str__(self):
return f"{self.name}#{self.discriminator}"
def __eq__(self, other):
return self.id == other.id and self.mod is other.mod
class MessageGroup:
def __init__(self, author):
self.author = author
self.messages = []
@property
def created_at(self):
return self.messages[0].human_created_at
@property
def type(self):
return self.messages[0].type
class Attachment:
def __init__(self, data):
if isinstance(data, str): # Backwards compatibility
self.id = 0
self.filename = "attachment"
self.url = data
self.is_image = True
self.size = 0
else:
self.id = int(data["id"])
self.filename = data["filename"]
self.url = data["url"]
self.is_image = data["is_image"]
self.size = data["size"]
class Message:
def __init__(self, data):
self.id = int(data["message_id"])
self.created_at = dateutil.parser.parse(data["timestamp"])
self.human_created_at = duration(self.created_at, now=datetime.utcnow())
self.raw_content = data["content"]
self.content = self.format_html_content(self.raw_content)
self.attachments = [Attachment(a) for a in data["attachments"]]
self.author = User(data["author"])
self.type = data.get("type", "thread_message")
self.edited = data.get("edited", False)
def is_different_from(self, other):
return (
(other.created_at - self.created_at).total_seconds() > 60
or other.author != self.author
or other.type != self.type
)
@staticmethod
def format_html_content(content):
return format_content_html(content)
|
from flaskr import create_app
#from flaskr.routes import socketio
application = create_app()
if __name__ == "__main__":
application.run(debug=True)
|
from django.conf import settings
from mighty.functions import make_searchable
from company.backends.search import SearchBackend
from company.choices.fr import LEGALFORM, APE
from io import BytesIO
import base64, pycurl, json, re, logging, time, datetime
logger = logging.getLogger(__name__)
class SearchBackend(SearchBackend):
token_url = 'https://api.insee.fr/token'
siren_url = 'https://api.insee.fr/entreprises/sirene/V3/siren'
siret_url = 'https://api.insee.fr/entreprises/sirene/V3/siret'
since_format = '%Y-%m-%d'
iso_format = '%Y-%m-%dT%H:%M:%S'
error = 5
raw_address = "%(address)s, %(locality)s %(postal_code)s"
def call_webservice(self, url, headers, postfields=None):
buffer = BytesIO() # buffer
c = pycurl.Curl() # ouverture du navigateur
c.setopt(c.URL, url) # définition de l'URL
c.setopt(c.WRITEDATA, buffer) # définition du buffer
c.setopt(c.HTTPHEADER, headers) # Ajoute l'entete d'autorisation avec la concatenation
if postfields:
c.setopt(c.POSTFIELDS, postfields) # ajoute les champs envoyer avec la method POST
try:
c.perform() # execute le navigateur
response_code = c.getinfo(c.RESPONSE_CODE) # récupération du code de réponse http
c.close() # fermeture du navigateur
datas = json.loads(buffer.getvalue())
except Exception as e:
logger.error(buffer.getvalue())
logger.error(e)
self.error-=1
if self.error:
return self.call_webservice(url, headers, postfields)
else:
raise e
return datas, response_code
def get_token(self):
basic = '%s:%s' % (settings.INSEE_KEY, settings.INSEE_SECRET)
basic = base64.b64encode(basic.encode('utf-8')).decode('utf-8')
headers = ["Authorization: Basic %s" % basic]
buffer, response_code = self.call_webservice(self.token_url, headers, "grant_type=client_credentials")
try:
return buffer["access_token"]
except Exception:
return False
def get_companies(self, qreq, number=50, offset=0):
message, companies, total, pages = (False, [], 0, 0)
access_token = self.get_token()
headers = ['Accept: application/json', 'Authorization: Bearer %s' % access_token]
url = "%s?q=%s&nombre=%s&debut=%s&masquerValeursNulles=true" % (self.siret_url, qreq, number, offset)
buffer, response_code = self.call_webservice(url, headers)
if'header' in buffer:
message = False if buffer['header']['message'] == "OK" else buffer['header']['message']
total = buffer['header'].get('total', 0)
pages = round(total/number) if total else 0
if str(response_code)[0] in ["2", "3"]:
for company in buffer.get('etablissements', [buffer['header']]):
logger.debug(company)
new_company = {
'siret': company.get('siret'),
'denomination': company['uniteLegale'].get('denominationUniteLegale', company['uniteLegale'].get('nomUniteLegale')),
'legalform': company['uniteLegale']['categorieJuridiqueUniteLegale'],
'ape': company['uniteLegale']['activitePrincipaleUniteLegale'].replace('.', ''),
'ape_noun': company['uniteLegale']['nomenclatureActivitePrincipaleUniteLegale'],
'since': self.since(company['uniteLegale'].get('dateCreationUniteLegale')),
'category': company['uniteLegale'].get('categorieEntreprise', ''),
'slice_effective': company['uniteLegale'].get('trancheEffectifsUniteLegale', ''),
'siege':company.get('etablissementSiege', False),
'rna': company['uniteLegale'].get('identifiantAssociationUniteLegale', None),
'address': {
'address': ' '.join(filter(None, [
company['adresseEtablissement'].get('numeroVoieEtablissement'),
company['adresseEtablissement'].get('typeVoieEtablissement'),
company['adresseEtablissement'].get('libelleVoieEtablissement')
])),
'complement': company['adresseEtablissement'].get('complementAdresseEtablissement', ''),
'locality': company['adresseEtablissement'].get('libelleCommuneEtablissement',
company['adresseEtablissement'].get('libelleCommuneEtrangerEtablissement', '')),
'postal_code': company['adresseEtablissement'].get('codePostalEtablissement',
company['adresseEtablissement'].get('codeCommuneEtablissement', '')),
'country': company['adresseEtablissement'].get('libellePaysEtrangerEtablissement', 'france').lower(),
'country_code': company['adresseEtablissement'].get('codePaysEtrangerEtablissement', 'fr').lower(),
'cedex': company['adresseEtablissement'].get('libelleCedexEtablissement', ''),
'cedex_code': company['adresseEtablissement'].get('codeCedexEtablissement', ''),
'special': company['adresseEtablissement'].get('distributionSpecialeEtablissement', ''),
'index': company['adresseEtablissement'].get('indiceRepetitionEtablissement', ''),
'nic': company.get('nic')
}
}
new_company['raw_address'] = self.raw_address % (new_company['address'])
new_company['ape_str'] = self.get_ape_str(new_company['ape'])
new_company['legalform_str'] = self.get_legalform_str(new_company['legalform'])
new_company['slice_str'] = self.get_slice_str(new_company['slice_effective'])
companies.append(new_company)
return message, companies, total, pages
else:
if 'fault' in buffer:
if buffer['fault']['code'] == 900804:
sleepfor = 61-datetime.datetime.now().second
i = 0
while i < sleepfor:
logger.info("desc: %s, wait: %s seconds" % (buffer['fault']['description'], sleepfor))
time.sleep(1)
sleepfor-=1
return self.get_companies(qreq, number, offset)
else:
logger.info(buffer)
else:
logger.info("Error encountered but we dont know what")
def get_company_by_siren(self, siren):
return self.get_companies('siren:%s+AND+etablissementSiege:true' % siren)
def get_company_by_rna(self, rna):
return self.get_companies('identifiantAssociationUniteLegale:%s+AND+etablissementSiege:true' % rna)
def get_active_companies(self, number, offset):
return self.get_companies('etatAdministratifUniteLegale:A', number, offset)
def get_company_by_fulltext(self, fulltext):
if len(fulltext) == 10 and fulltext[0] == 'W':
return self.get_company_by_rna(fulltext)
fulltext = re.sub(r"\s+", '-', fulltext)
return self.get_companies('denominationUniteLegale:%s+AND+etatAdministratifUniteLegale:A' % make_searchable(fulltext))
|
from __future__ import print_function
from bokeh.layouts import layout, column
from bokeh.models import Div
from bokeh.models.widgets import DateRangeSlider, Select, MultiSelect
from plots import *
class Page(object):
def __init__(self, data, sizing_mode='stretch_both'):
self.full_data = data
self.sizing_mode = sizing_mode
self._init_controls()
# page components
self.abstract = ''
self.control_col = column(*self.controls, sizing_mode=sizing_mode)
self.scatter_plot = ScatterPlot()
self.box_plot = BoxPlot()
self.plot_col = column([self.scatter_plot.figure, self.box_plot.figure], sizing_mode=self.sizing_mode)
# build layout
self.layout = layout([
# [Div(text=self.abstract, sizing_mode=sizing_mode)],
[self.control_col, self.plot_col]
], sizing_mode=self.sizing_mode)
# init selection
self.update()
def _init_controls(self):
min_date = self.full_data.check_date.min().date()
max_date = self.full_data.check_date.max().date()
all_status = list(pd.unique(self.full_data.status)) + ['All']
all_app_types = list(pd.unique(self.full_data.type)) + ['All']
self.date_slid = DateRangeSlider(title="Check date range", start=min_date, end=max_date,
value=(min_date, max_date), step=1)
self.embassy_sel = MultiSelect(title="Embassy", value=['BeiJing'],
options=pd.unique(self.full_data['loc']).tolist())
self.visa_type_sel = MultiSelect(title="Type of visa", value=['H1'],
options=pd.unique(self.full_data['visa']).tolist())
self.status_sel = Select(title="Application status", value='All', options=all_status)
self.app_type_sel = Select(title="Type of application", value='All', options=all_app_types)
for ctrl in self.controls:
ctrl.on_change('value', lambda attr, old, new: self.update())
@property
def components(self):
return self.scatter_plot, self.box_plot
@property
def controls(self):
return [self.date_slid, self.embassy_sel, self.visa_type_sel, self.status_sel, self.app_type_sel]
def update(self):
selected = self.select_data()
for ele in self.components:
ele.update_data(selected)
def select_data(self):
date_start, date_end = self.date_slid.value_as_datetime
emb = set(self.embassy_sel.value)
vtypes = set(self.visa_type_sel.value)
status = self.status_sel.value
atype = self.app_type_sel.value
selected = self.full_data[
(pd.to_datetime(self.full_data.check_date).between(date_start, date_end)) &
(self.full_data['loc'].isin(emb)) &
(self.full_data.visa.isin(vtypes))
]
if status != "All":
selected = selected[selected.status == status]
if atype != "All":
selected = selected[selected.type == atype]
return selected
|
#!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Executable and reusable sample for enumerating all the detection results."""
import argparse
import base64
import binascii
import pprint
import re
from typing import Any, Mapping, Optional, Sequence, Tuple
from . import chronicle_auth
from google.auth.transport import requests
CHRONICLE_API_BASE_URL = "https://backstory.googleapis.com"
OPERATION_ID_PATTERN = re.compile(
r"rulejob_jo_[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-" +
r"[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}")
DECODED_TOKEN_PATTERN = re.compile(
rb"[\s\S]+[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-" +
rb"[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}")
def list_results(
http_session: requests.AuthorizedSession,
operation_id: str,
page_size: int = 0,
page_token: Optional[str] = None
) -> Tuple[Sequence[Mapping[str, Any]], str]:
"""Retrieves all the results of the specified detection operation.
This includes both errors and Universal Data Model (UDM) matches. Any results
that are fetched before the operation has completed may be incomplete and
should not be considered authoritative.
Args:
http_session: Authorized session for HTTP requests.
operation_id: Unique ID of the asynchronous detection operation
("rulejob_jo_<UUID>").
page_size: Maximum number of results in the response. Must be non-negative.
Optional - no client-side limit by default.
page_token: Base64-encoded string token to retrieve a specific page of
results. Optional - we retrieve the first page if the token is an empty
string or a None value.
Returns:
All the results (within the defined page) as an ordered sequence of errors
or UDM matches, as well as a Base64 token for getting the results of the
next page (an empty token string means the currently retrieved page is the
last one).
Raises:
ValueError: Invalid input value.
requests.exceptions.HTTPError: HTTP request resulted in an error
(response.status_code >= 400).
"""
if page_size < 0:
raise ValueError(f"Invalid input: page_size = {page_size}, must be >= 0.")
if page_token:
try:
if not DECODED_TOKEN_PATTERN.fullmatch(base64.b64decode(page_token)):
raise ValueError(f"Invalid page token: '{page_token}'.")
except binascii.Error:
raise ValueError(f"Invalid page token: '{page_token}'.")
url = (f"{CHRONICLE_API_BASE_URL}/v1/rules_results?name=operations/" +
operation_id)
if page_size > 0:
url += f"&page_size={page_size}"
if page_token:
url += f"&page_token={page_token}"
response = http_session.request("GET", url)
# Expected server response:
# {
# "nextPageToken": "<base64>",
# "results": [
# {
# "match": {<UDM keys and values / sub-dictionaries>...}
# },
# ...
# {
# "error": {"errorMessage": "..."}
# },
# ...
# ]
# }
# - or -
# { }
if response.status_code >= 400:
print(response.text)
response.raise_for_status()
json = response.json()
return json.get("results", []), json.get("nextPageToken", "")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
chronicle_auth.add_argument_credentials_file(parser)
parser.add_argument(
"-oi",
"--operation_id",
type=str,
required=True,
help="detection operation ID ('rulejob_jo_<UUID>')")
parser.add_argument(
"-ps",
"--page_size",
type=int,
default=0,
help="maximum number of results to return (default: 0 = no limit)")
parser.add_argument(
"-pt",
"--page_token",
type=str,
default=None,
help="page token (default: none)")
args = parser.parse_args()
session = chronicle_auth.init_session(
chronicle_auth.init_credentials(args.credentials_file))
results, next_page_token = list_results(session, args.operation_id,
args.page_size, args.page_token)
pprint.pprint(results)
print(f"Next page token: {next_page_token}")
|
#!/usr/bin/env python -u
# Clinton Cario
# 02/05/2016
# Rewritten based on SSM_populator for memsql, no dependency on peewee and cleaner more efficient tables
# 02/09/2016
# Fixed bug where single quotes ruin syntax by incorporating a strip_invalid function that sanitizes syntax inputs. (featurizer)
# 02/10/2016
# Speed improvements in insert_data with multiple updates instead of case updates
# Made verbose a class variable for more consistent usage
# Added key_col as a special parameter so that keys other than the primary can be used to populate/annotate tables
# 02/17/2016
# Fixed issue with pri_key in featurizer function
# 02/26/2016
# Added featurizer_continuous code
# 03/23/2016
# Added command line verbose argument
# 03/23/2016
# Modified DB populators a bit.
# 03/28/2016
# Added retry to DB connect
# 03/30/2016
# Faster row number query in _chunkify
# 04/04/2016
# Fixed featurizer bug where multiple columns weren't being flagged
# 04/20/2016 ===~~
# Added self.create_column scrub parameter to strip invalid characters and spaces
# 04/28/2016
# Added safe_query/safe_get functions to prevent script failure on DB connection interruptions
# 05/04/2016
# Modified some command line argument names
# 05/05/2016
# Changed show_sql to be a formal parameter, added show_all_sql to for explicit insert/update queries, modified __init__ and populate functions
# 08/03/2016
# Added more verbose reporting
# 11/15/2016
# Added pri_key_type and sensible defaults to generate BIGINT(10) sbs_id's for feature tables that don't use mutation_id... results in faster lookup using these keys
# 03/25/2017
# Added metadata table population to facilitate downstream database lookups
# 03/25/2017
# Merged populator functionality, renamed mutation_db
# Faster featurize_category function using cached metadata and mutation key ranges instead of lookups
# 05/03/2017
# Renamed orchid_db
# 05/06/2017
# Modified database table columns a bit
# 05/10/2017
# Major changes to annotate
# Removed featurize code
# 05/23/2017
# Removed redundant safe_get() function
# 05/30/2017
# All created feature tables are now lowercase (to be consistent between mysql and memsql databases)
# System libraries
import os, sys, re, argparse
from memsql.common import database
from urlparse import urlparse
from time import sleep
import json
from pprint import pprint
# Define Amino Acid Classes
aa_classes = {
'G': 'Aliphatic',
'A': 'Aliphatic',
'V': 'Aliphatic',
'L': 'Aliphatic',
'M': 'Aliphatic',
'I': 'Aliphatic',
'S': 'Polar',
'T': 'Polar',
'C': 'Polar',
'P': 'Polar',
'N': 'Polar',
'Q': 'Polar',
'K': 'Positive',
'R': 'Positive',
'H': 'Positive',
'D': 'Negative',
'E': 'Negative',
'F': 'Aromatic',
'Y': 'Aromatic',
'W': 'Aromatic',
'*': 'Stop'
}
aa_three2one = {
'Ala':'A',
'Arg':'R',
'Asn':'N',
'Asp':'D',
'Asx':'B',
'Cys':'C',
'Glu':'E',
'Gln':'Q',
'Glx':'Z',
'Gly':'G',
'His':'H',
'Ile':'I',
'Leu':'L',
'Lys':'K',
'Met':'M',
'Phe':'F',
'Pro':'P',
'Ser':'S',
'Thr':'T',
'Trp':'W',
'Tyr':'Y',
'Val':'V',
'*' :'*',
'?' :'?'
}
class Manager():
insert_size = 5000
max_attempts = 10
host = None
port = None
user = None
password = None
database = None
verbose = None
show_sql = None
show_all_sql = None
def __init__(self, db_uri, verbose=True, show_sql=False, show_all_sql=False):
db_info = urlparse(db_uri)
self.host = db_info.hostname
self.port = db_info.port
self.user = db_info.username
self.password = db_info.password
self.database = db_info.path.strip('/')
self.verbose = verbose
self.show_sql = show_sql
self.show_all_sql = show_all_sql
def create_tables(self, mut_table, cons_table):
# Create the mutation table (DEFAULT NULLs make for muuuuch faster updates without crazy syntax)
syntax = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s_id` INT unsigned NOT NULL AUTO_INCREMENT,
`is_simulated` BOOL DEFAULT NULL,
`mutation_id` VARCHAR(32) DEFAULT NULL,
`donor_id` VARCHAR(16) DEFAULT NULL,
`chromosome` ENUM('1','10','11','12','13','14','15','16','17','18','19','2','20','21','22','3','4','5','6','7','8','9','X','Y') DEFAULT NULL,
`start` INT unsigned DEFAULT NULL,
`end` INT unsigned DEFAULT NULL,
`mutation_type` ENUM('SBS','MBS','INS','DEL') DEFAULT NULL,
`reference_genome_allele` VARCHAR(200) DEFAULT NULL,
`mutated_from_allele` VARCHAR(200) DEFAULT NULL,
`mutated_to_allele` VARCHAR(200) DEFAULT NULL,
PRIMARY KEY (`%s_id`),
INDEX (`mutation_id`),
INDEX (`donor_id`),
INDEX (`chromosome`, `start`),
INDEX (`is_simulated`)
);
""" % tuple([mut_table]*3)
with self.get_connection() as db:
db.execute(syntax)
# Create the consequence table
syntax = """
CREATE TABLE IF NOT EXISTS `%s` (
`%s_id` INT unsigned NOT NULL AUTO_INCREMENT,
`mutation_id` VARCHAR(32) DEFAULT NULL,
`impact` ENUM('HIGH','MODERATE','LOW','MODIFIER') DEFAULT NULL,
`gene_id` VARCHAR(64) DEFAULT NULL,
`gene_name` VARCHAR(64) DEFAULT NULL,
#`feature_type` VARCHAR(64) DEFAULT NULL,
#`feature_id VARCHAR(64) DEFAULT NULL,
`transcript_biotype` VARCHAR(64) DEFAULT NULL,
`consequence_type` VARCHAR(64) DEFAULT NULL,
`cds_position` INT unsigned DEFAULT NULL,
`aa_position` INT unsigned DEFAULT NULL,
`aa_from_allele` CHAR(1) DEFAULT NULL,
`aa_to_allele` CHAR(1) DEFAULT NULL,
`aa_from_class` VARCHAR(10) DEFAULT NULL,
`aa_to_class` VARCHAR(10) DEFAULT NULL,
`aa_class_change` VARCHAR(24) DEFAULT NULL,
PRIMARY KEY (`%s_id`),
INDEX (`mutation_id`),
INDEX (`gene_id`)
);
""" % tuple([cons_table]*3)
with self.get_connection() as db:
db.execute(syntax)
# Create the consequence table
syntax = """
CREATE TABLE IF NOT EXISTS `metadata` (
`metadata_id` INT unsigned NOT NULL AUTO_INCREMENT,
`metakey` VARCHAR(32) DEFAULT NULL,
`metavalue` TEXT DEFAULT NULL,
PRIMARY KEY (`metadata_id`)
);
"""
with self.get_connection() as db:
db.execute(syntax)
## To draw a progress bar
def progress_bar(self, cur,total,message="Parsing..."):
progress = min(int(cur*100/total),100)
sys.stdout.write("\rProgress: [{0:100s}] {1:3d}% [{2:d}/{3:d}] {4:50s}".format('=' * progress, progress, cur, total, message))
sys.stdout.flush()
def safe_query(self, query, ignore_errors=False, ignore_codes=None):
for attempt in xrange(1,self.max_attempts+1):
try:
with self.get_connection() as db:
return db.query(query)
break;
except Exception as e:
print "Error: ", e.message, e.args
if ignore_errors or e.args[0] in ignore_codes:
print "(Ignored)"
return
print "\tTrouble querying the database, retrying... (attempt: %d/%d)" % (attempt, self.max_attempts)
sleep(attempt)
continue
sys.exit('Quering the database failed after repeated attempts, giving up.')
def get_connection(self):
for attempt in xrange(1,self.max_attempts+1):
try:
return database.connect(host=self.host, port=self.port, user=self.user, password=self.password, database=self.database)
break;
except Exception as e:
print e.message, e.args
if self.verbose: print "\tTrouble establishing a database connection, retrying... (attempt: %d/%d)" % (attempt, self.max_attempts)
sleep(attempt)
continue
sys.exit('Establishing a database connection failed after 5 attempts, giving up.')
def run_sql(self, syntax, success_msg="(OK)", error_msg="(Failed)", ignore_errors=False, ignore_codes=None):
try:
if self.show_sql: print syntax
self.safe_query(syntax, ignore_errors, ignore_codes)
if self.verbose and success_msg!=None: print success_msg
except Exception as e:
if self.verbose and error_msg!=None: print error_msg
print e.message, e.args
def strip_invalid(self, instr):
return re.sub('[^0-9a-zA-Z]+', '_', instr)
def create_table(self, table, pri_key, pri_key_type="VARCHAR(255)"):
syntax = "CREATE TABLE IF NOT EXISTS %s (%s %s NOT NULL UNIQUE, PRIMARY KEY (%s))" % (table.lower(), pri_key, pri_key_type, pri_key)
success = "The '%s' table was created" % table
error = "Creation failed. Please check table parameter and database connection"
self.run_sql(syntax, success, error)
def create_column(self, table, column, sql_type="VARCHAR(255) DEFAULT NULL", scrub=True):
if scrub:
column = self.strip_invalid(column).replace(' ','_').lower()
syntax = "ALTER TABLE %s ADD `%s` %s" % (table, column, sql_type)
success = "The '%s' column was created" % column
error = "Column exists or creation failed. Please check table and column parameters and database connection"
self.run_sql(syntax, success, error, ignore_codes=[1060]) # Ignore column exists error code
def delete_table(self, table):
syntax = "DROP TABLE %s" % table
success = "The '%s' table was dropped." % table
error = "Table deletion failed. Please check table name and database connection"
self.run_sql(syntax, success, error)
def delete_column(self, table, column):
syntax = "ALTER TABLE %s DROP COLUMN %s" % (table, column)
success = "The '%s' column was dropped." % column
error = "Column deletion failed. Please table and column names and database connection"
self.run_sql(syntax, success, error)
def reset_table(self, table, pri_key):
self.delete_table(table)
self.create_table(table, pri_key)
def reset_column(self, table, column, sql_type):
self.delete_column(table, column)
self.create_column(table, column, sql_type)
def create_if_needed(self, table, pri_key=None, pri_key_type="VARCHAR(255)", column=None, sql_type="VARCHAR(255)"):
# Try to figure out the primary key if not provided
pri_key = pri_key if pri_key!=None else "%s_id" % (table)
self.create_table(table, pri_key, pri_key_type)
self.create_column(table, column, sql_type=sql_type)
# Define a populator function
# data is [{key: value}] where key is a column name and value is the row entry
# Multiple columns can be specified, and values should have correspondence in order
def populate_table(self, data, table, pri_key, verbose=None, show_sql=None, show_all_sql=None):
if verbose == None: verbose = self.verbose
if show_sql == None: show_sql = self.show_sql
if show_all_sql == None: show_all_sql = self.show_all_sql
if verbose: print "Populating [ %s ]" % table
if len(data)==0: print "No data, skipping"; return
columns = data[0].keys()
non_key_columns = [ col for col in columns if col != pri_key ]
batch = 0
for i in xrange(0, len(data), self.insert_size):
batch += 1
for attempt in xrange(1,self.max_attempts+1):
try:
if verbose: print "Inserting batch [%d], attempt [%d]..." % (batch, attempt),
# Subset the insert values for this chunk
values = [ "('%s')" % ("','".join([ str(entry[col]) for col in columns ])) for entry in data[i:i+self.insert_size] ]
syntax = "INSERT INTO `%s` (`%s`) VALUES %s " % (table, "`,`".join(columns), ",".join(values))
syntax += "ON DUPLICATE KEY UPDATE " + ", ".join(["`%s`=VALUES(`%s`)" %(col, col) for col in non_key_columns]) + ";"
syntax = re.sub("'None'",'NULL',syntax, flags=re.MULTILINE)
#if show_sql: print "\t"+syntax[0:200]+" ...... \t"+syntax[-50:]
if show_all_sql: print syntax
#if verbose: print "\tAttempting to execute"
self.safe_query(syntax)
if verbose: print "(OK)"
break
except Exception as e:
if verbose: print "(Failed)"
if attempt >= self.max_attempts:
print e.message, e.args
sys.exit(0)
continue
# metakey: 'counts', 'distincts', or other
# metavalue: JSONified string of values
def update_metadata(self, metakey, metavalue):
# Check if an entry exists
syntax = "SELECT * FROM metadata WHERE `metakey`='%s';" % (metakey)
result = self.safe_query(syntax)
if len(result)>0:
# Update
id_ = result[0]['metadata_id']
if metakey=='count':
value = json.dumps(int(result[0]['metavalue']) + value)
data = [{'metadata_id':id_, 'metakey':metakey, 'metavalue':metavalue}]
self.populate_table(data, 'metadata', 'metadata_id', verbose=False)
else:
# Insert
data = [{'metakey':metakey, 'metavalue':metavalue}]
self.populate_table(data, 'metadata', 'metadata_id', verbose=False)
# Fasta file should look like:
## >primary_key
## sequence
# The 'processor_*' arguments are optional but otherwise should be lambda functions to apply to the description (primary key) and sequence fields
# Note that the '>' of the description field is parsed out
def parse_fasta(self, source_file, acceptable=None, id_processor=None, seq_processor=None):
if self.verbose: print "Parsing fasta...",
if source_file == None:
sys.exit("You must specify a source file to parse as the first argument!")
# Get the data
data = []
with open(source_file, 'r') as ifh:
for line in ifh:
pri_key = line.replace(">","").strip()
pri_key = id_processor(pri_key) if (id_processor!=None) else pri_key
seq = ifh.next().strip()
seq = seq_processor(seq) if (seq_processor!=None) else seq
if acceptable and (seq not in acceptable): continue
data.append({'key':pri_key, 'val':seq})
#print pri_key, seq
if self.verbose: print "done."
return data
def parse_flatfile(self, source_file, acceptable=None, id_col=0, data_col=1, delimiter="\t", id_processor=None, data_processor=None):
if self.verbose: print "Parsing flatfile...",
if source_file == None:
sys.exit("You must specify a source file to parse as the first argument!")
data = []
with open(source_file, 'r') as ifh:
for line in ifh:
line = line.strip().split(delimiter)
pri_key = line[id_col]
pri_key = id_processor(pri_key) if (id_processor!=None) else pri_key
val = line[data_col]
val = data_processor(val) if (data_processor!=None) else val
if acceptable and (val not in acceptable): continue
data.append({'key':pri_key, 'val':val})
if self.verbose: print "done."
return data
def populate_annotations(self, data, dst_tbl, dst_val_col, dst_val_sql="VARCHAR(255)", dst_id_col=None, dst_id_sql="VARCHAR(255)", ):
# Try to figure out the primary key if not provided
dst_id_col = dst_id_col if dst_id_col!=None else "%s_id" % (dst_tbl)
# Create the table and column (if needed)
self.create_if_needed(dst_tbl, dst_id_col, dst_id_sql, dst_val_col, dst_val_sql)
# Don't do anything if data wasn't provided
if data==None or len(data)==0:
print "No data to insert"
return
# Determine whether or not to collect distinct categories
collect = False if dst_val_sql[0:3]=='INT' or dst_val_sql[0:5]=='FLOAT' else True
# Do the inserts (populator method)
import_data = []
distincts = set()
feature_type = None
for insert in data:
# Prepare this data for import
import_data.append({ dst_id_col: insert['key'], dst_val_col: insert['val'] })
# Determine if the value is not numeric
try:
float(insert['val'])
feature_type = 'numeric'
except:
# Only for non numeric types
distincts = distincts.union(set([insert['val']]))
feature_type = 'categorical'
self.populate_table(import_data, dst_tbl, dst_id_col)
if self.verbose: print "Update successful"
def populate_mutations(self, infile, chunk_size, simulated, mut_types, mut_table, cons_table, acceptable):
num_lines = sum(1 for line in open(infile,'r'))
infile = open(infile,'r')
## Store all relational data into these three structures, corresponding to the tables
## Donor has many mutations (1 to many)
## Mutation (SSM) has many consequences (1 to many)
## Store the primary key of one side of relationship as
## the foreign key in table on many side of relationship
mutations = []
consequences = []
last_donor = None
last_mutation = None
line_no = 0
mutation_count = 0
consequence_count = 0
distincts = {}
for line in infile:
line_no += 1
# Ignore header lines
if line.startswith("#") or line.startswith("\n"):
if self.verbose: self.progress_bar(line_no,num_lines,"Skipped header line...")
continue
# Show the progress bar if in verbose mode
if (line_no % 100 == 0) and self.verbose:
self.progress_bar(line_no,num_lines)
# Give friendly names to all columns, trying to get the etcetera column if it exists
# The workflow will add this column, which is the FORMAT column in VCF, but use for a custom purpose here
entry = line.strip("\n").split("\t")
etcetera = None
try:
chromosome, start, mutation_id, from_allele, to_allele, quality, filt, info, etcetera = entry
except ValueError:
chromosome, start, mutation_id, from_allele, to_allele, quality, filt, info = entry
# Parse etcetera information
ref_allele, donor_id, mutation_type = (None, None, None)
if etcetera!=None and etcetera!='.':
ref_allele = re.match(r'^.*REF_ALLELE=([\w-]+);.*$', etcetera, re.M)
if ref_allele: ref_allele = ref_allele.group(1)
donor_id = re.match(r'^.*DONOR_ID=([^;]+);.*$', etcetera, re.M)
if donor_id: donor_id = donor_id.group(1)
mutation_type = re.match(r'^.*TYPE=([\w_]+);.*$', etcetera, re.M)
if mutation_type: mutation_type = mutation_type.group(1)
# Try sensible defaults if there are any issues
ref_allele = ref_allele or from_allele
donor_id = donor_id or "DOXXXXXX"
mutation_type = mutation_type or None
if mutation_type == None:
if from_allele == '-':
mutation_type = 'INS'
elif to_allele == '-':
mutation_type = 'DEL'
elif len(from_allele)==1 and len(to_allele)==1:
mutation_type = 'SBS'
elif len(from_allele)>1 or len(to_allele)>1:
mutation_type = 'MBS'
# Skip non-requested mutation types
if mutation_type and mut_types and mutation_type not in mut_types:
if self.verbose: self.progress_bar(line_no,num_lines,"Skipped non requested mutation type (%s)..." % mutation_type)
continue
# Skip all non standard chromosomes
chromosome = chromosome.strip('chr')
if chromosome not in map(str,range(1,23))+['X','Y']:
if self.verbose: self.progress_bar(line_no,num_lines,"Skipped invalid chromosome (%s)..." % chromosome)
continue
# Calculate the end position (not available from VCF)
end = start
if mutation_type in ("MBS", "DEL"):
end = int(start) + len(ref_allele)-1
# Reduce indel size to 200 (as per ICGC claims)
ref_allele = ref_allele[0:200]
from_allele = from_allele[0:200]
to_allele = to_allele[0:200]
# Make up a mutation id if needed
if mutation_id=='.':
mutation_id = str(chromosome) + '_' + str(start) + '_' + str(end)
# Add the mutation to the population list!
if not (mutation_id == last_mutation):
mutations.append({
'is_simulated' : (1 if simulated else 0),
'mutation_id' : mutation_id,
'donor_id' : donor_id,
'chromosome' : chromosome,
'start' : int(start),
'end' : int(end),
'mutation_type' : mutation_type,
'reference_genome_allele' : ref_allele,
'mutated_from_allele' : from_allele,
'mutated_to_allele' : to_allele,
})
mutation_count += 1
# Parse the info consequences
for shard in info.split(";"):
if shard[0:4]=='ANN=':
for splinter in shard.split(','):
splinter = splinter.replace('ANN=','')
allele, consequence, impact, name, gene_id, feature_type, feature_id, \
transcript_biotype, rank_total, dna_position, aa_position, cDNA_position_len, \
CDS_position_len, protein_position_len, distance, errors = splinter.split('|')
# Skip entries with consequences that aren't considered acceptable
if acceptable and (consequence not in acceptable):
continue
# Truncate the consequence_type if very long (database doesn't like this)
if len(consequence) >= 32: consequence=consequence[0:31]
if consequence == 'intergenic_region':
gene_id = None
name = None
# Amino acid information
aa_pos, aa_from, aa_to, aa_from_class, aa_to_class, aa_class_change = (None, None, None, None, None, None)
aa_info = re.match(r'^p\.([a-zA-Z\*]+)(\d+)([a-zA-Z\*\?]*)$', aa_position, re.M)
if aa_info:
aa_pos = aa_info.group(2)
aa_from = aa_info.group(1)
aa_to = aa_info.group(3)
try:
aa_from_class = aa_classes[aa_three2one[aa_from]]
except:
pass
try:
aa_to_class = aa_classes[aa_three2one[aa_to]]
except:
pass
try:
if aa_from_class != None and aa_to_class != None:
aa_class_change = aa_from_class + " => " + aa_to_class
if aa_from_class == aa_to_class: aa_class_change = "Unchanged"
except:
pass
# The CDS position
cds_position = None if CDS_position_len==None else CDS_position_len.split("/")[0]
# Add the consequence to the population list!
this = {
'mutation_id' : mutation_id,
'impact' : impact or None,
'gene_id' : gene_id or None,
'gene_name' : name or None,
#'feature_type' : feature_type or None,
#'feature_id' : feature_id or None,
'transcript_biotype' : transcript_biotype or None,
'consequence_type' : consequence or None,
'cds_position' : cds_position or None,
'aa_position' : aa_pos or None,
'aa_from_allele' : aa_from or None,
'aa_to_allele' : aa_to or None,
'aa_from_class' : aa_from_class or None,
'aa_to_class' : aa_to_class or None,
'aa_class_change' : aa_class_change or None,
}
if this not in consequences:
consequences.append(this)
consequence_count +=1
# Do a bulk insert if we've collect at least chunk_size lines and have finished a mutation
if ((last_mutation != mutation_id) and (len(consequences) >= chunk_size)) or (line_no>=num_lines-1):
#print "Populating... %s" % line_no
if self.verbose: self.progress_bar(line_no,num_lines,"Populating Mutations...")
self.populate_table(mutations, mut_table, "%s_id"%mut_table, verbose=False)
if self.verbose: self.progress_bar(line_no,num_lines,"Populating consequences...")
self.populate_table(consequences, cons_table, "%s_id"%cons_table, verbose=False)
# Reset chunk variables
mutations = []
consequences = []
last_mutation = mutation_id
last_donor = donor_id
if self.verbose: self.progress_bar(num_lines,num_lines)
print "\nAdding metadata table..."
data = []
# Update table counts
self.update_metadata(
metakey = "%s_count" % mut_table,
metavalue = mutation_count
)
self.update_metadata(
metakey = "%s_count" % cons_table,
metavalue = consequence_count
)
print "Completed Successfully."
# Close everything
infile.close()
if __name__ == "__main__":
# =============================
# Parameter Definition
# ---------
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(help='commands', dest="command")
#### The populate command
populate = subparsers.add_parser('populate', help='This command takes a SNPEff VCF file and populates a database.')
populate.add_argument('-x', '--connection', action='store', dest='db_uri', help='A database URI connection string (e.g. mysql://user:pass@host:port/DB) if $DATABASE is not defined.')
populate.add_argument('-i', '--input-file', action='store', dest='infile', help='A tab delimited file of SNPEff VCF or simulated mutations.')
populate.add_argument('-k', '--chunk-size', action='store', dest='chunk_size', help='About how many mutation consequences should be collected before a database insert (actual insert depends on when the current donor finishes when this limit is reached.')
populate.add_argument('-s', '--simulated', action='store_true', dest='simulated', help='Whether the imported data is simulated (will set simulated flag in database and ignore metadata fields.')
populate.add_argument('-t', '--mutation-types', action='store', dest='mut_types', help='The type of mutation(s) to populate (space-delimit list items; default: accept all).', nargs='+')
populate.add_argument('-m', '--mutation-table', action='store', dest='mut_table', help='The name of the table to store mutations of type [mutation-type] (default: ssm).')
populate.add_argument('-c', '--consequence-table', action='store', dest='cons_table', help='The name of the table to store mutation consequences (default: consequence).')
populate.add_argument('-A', '--acceptable-consequences', action='store', dest='acceptable', help='A JSON string containing a list of acceptable consequences.')
populate.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Whether to be verbose and display status on the command line.', default=True)
populate.add_argument('-vv', '--show-sql', action='store_true', dest='show_sql', help='Whether to show sql statements, excluding insert/update queries.', default=True)
populate.add_argument('-vvv', '--show-all-sql', action='store_true', dest='show_all_sql', help='Whether to show sql statements, inclusing insert/update queries.', default=False)
#### The annotate command
# Input file options
annotate = subparsers.add_parser('annotate', help='Populate the database with feature information.')
annotate.add_argument('-i', '--source-file', action='store', dest='src_file', help='The name of the source file used to annotate the destination table.')
annotate.add_argument('-t', '--source-type', action='store', dest='src_type', help='The source file type: \'flatfile\' or \'fasta\'.', choices=['fasta', 'flatfile'])
annotate.add_argument('-sic', '--source-id-column', action='store', dest='src_id_col', help='The column in the source file that corresponds to the mutation id. 0-indexed, defaults to 0.')
annotate.add_argument('-svc', '--source-value-column', action='store', dest='src_val_col', help='The column in the source data file that corresponds to the value to insert. 0-indexed, defaults to 1.')
annotate.add_argument('-D', '--delimiter', action='store', dest='delimiter', help='The field delimiter for the source file. Defaults to tab.')
# Database table options
annotate.add_argument('-x', '--connection', action='store', dest='connection', help='A database URI connection string (e.g. mysql://user:pass@host:port/DB) if $DATABASE is not defined.')
annotate.add_argument('-d', '--destination-table', action='store', dest='dst_tbl', help='The name of the destination table. Defaults to \'ssm\'.')
annotate.add_argument('-dic', '--destination-id-column', action='store', dest='dst_id_col', help='The primary key column name of [destination-table]. Defaults to [destination-table]_id.')
annotate.add_argument('-dis', '--destination-id-sql', action='store', dest='dst_id_sql', help='The SQL type of id (primary key). Defaults to \'BIGINT\'.')
annotate.add_argument('-c', '--destination-value-column', action='store', dest='dst_val_col', help='The column name in [destination-table] for the inserted values. Defaults to \'values\'.')
annotate.add_argument('-dvs', '--destination-value-sql', action='store', dest='dst_val_sql', help='The SQL type of inserted values. Defaults to \'VARCHAR(255)\'.')
# Processing options
annotate.add_argument('-A', '--acceptable', action='store', dest='acceptable', help='A list of acceptable values for the value column in the destination table (space-delimit list items; default: accept all).', nargs='+')
annotate.add_argument('-I', '--id-processor', action='store', dest='id_processor', help='Python lambda function as a string that will be applied to the id column before inserting into the destination table. Defaults to None.')
annotate.add_argument('-S', '--sequence-processor', action='store', dest='seq_processor', help='Python lambda function as a string that will be applied to fasta sequence entries before inserting into the destination table. Defaults to None.')
annotate.add_argument('-V', '--value-processor', action='store', dest='val_processor', help='Python lambda function as a string that will be applied to value column entries before inserting into the destination table. Defaults to None.')
# Debug options
annotate.add_argument('-v', '--verbose', action='store_true', dest='verbose', help='Whether to be verbose and display status on the command line.')
annotate.add_argument('-vv', '--show-sql', action='store_true', dest='show_sql', help='Whether to show sql statements, excluding insert/update queries.')
annotate.add_argument('-vvv', '--show-all-sql', action='store_true', dest='show_all_sql', help='Whether to show sql statements, inclusing insert/update queries.')
# JSON list of options
annotate.add_argument('-j', '--json', action='store', dest='json', help='A JSON string of command arguments as a map')
args = parser.parse_args()
# =============================
# POPULATE MUTATIONS
# ---------
if args.command=='populate':
# Parse parameters from the command line or as given in the passed JSON string (preferring command line arguments)
db_uri = os.environ.get('DATABASE') or args.db_uri or extra.get('connection')
infile = args.infile
chunk_size = args.chunk_size or 20000
simulated = args.simulated or False
mut_types = args.mut_types or None
mut_table = args.mut_table or 'ssm'
cons_table = args.cons_table or 'consequence'
acceptable = args.acceptable
verbose = args.verbose
show_sql = args.show_sql
show_all_sql = args.show_all_sql
# Create a manager to parse the data ...
manager = Manager(db_uri, verbose, show_sql, show_all_sql)
print "Creating database tables..."
manager.create_tables(mut_table, cons_table)
print "Importing mutations..."
manager.populate_mutations(infile, chunk_size, simulated, mut_types, mut_table, cons_table, acceptable)
# =============================
# ANNOTATE MUTATIONS
# ---------
if args.command=='annotate':
extra = json.loads(args.json) if args.json else {}
# Input file options
src_file = args.src_file or extra.get('source_file')
src_type = args.src_type or extra.get('source_type') or 'flatfile'
src_id_col = args.src_id_col or extra.get('source_id_column') or 0
src_val_col = args.src_val_col or extra.get('source_value_column') or 1
delimiter = args.delimiter or extra.get('delimiter') or "\t"
# Database table options
db_uri = os.environ.get('DATABASE') or args.connection or extra.get('connection')
dst_tbl = args.dst_tbl or extra.get('destination_table') or 'ssm'
dst_id_col = args.dst_id_col or extra.get('destination_id_column') or dst_tbl+'_id'
dst_id_sql = args.dst_id_sql or extra.get('destination_id_sql') or 'BIGINT'
dst_val_col = args.dst_val_col or extra.get('destination_value_column') or 'values'
dst_val_sql = args.dst_val_sql or extra.get('destination_value_sql') or 'VARCHAR(255)'
# Processing options
acceptable = args.acceptable or extra.get('acceptable') or None
id_processor = args.id_processor or extra.get('id_processor') or None
seq_processor = args.seq_processor or extra.get('sequence_processor') or None
val_processor = args.val_processor or extra.get('value_processor') or None
# Debug options
verbose = args.verbose or extra.get('verbose') or False
show_all_sql = args.show_all_sql or extra.get('show_all_sql') or False
show_sql = args.show_sql or extra.get('show_sql') or False or show_all_sql
# Convert data processor functions into python functions proper
# These processors are not sanitized.
# ASSUMES CODE PASSED ON THE COMMAND LINE IS TRUSTED!
try:
seq_processor = eval(seq_processor)
except:
pass
try:
id_processor = eval(id_processor)
except:
pass
try:
val_processor = eval(val_processor)
except:
pass
# Create a manager to parse the data ...
manager = Manager(db_uri, verbose, show_sql, show_all_sql)
data = []
if src_type=="fasta":
print "Parsing fasta file: %s..." % (src_file)
data = manager.parse_fasta(src_file, acceptable, id_processor, seq_processor)
elif src_type=="flatfile":
print "Parsing flatfile: %s..." % (src_file)
data = manager.parse_flatfile(src_file, acceptable, src_id_col, src_val_col, delimiter, id_processor, val_processor)
# ... and then insert it into the database
print "Importing data to: %s.%s" % (dst_tbl, dst_val_col)
manager.populate_annotations(data, dst_tbl, dst_val_col, dst_val_sql, dst_id_col, dst_id_sql)
|
import networkx as nx
import os
from diagram import Diagram
from spf import spf
class lfa:
"""This class provides RFC5286 lfa calculations"""
def __init__(self, debug=0):
"""
Init the lfa class.
:param int debug: debug level, 0 is disabled.
:return None: __init__ shouldn't return anything
:rtype: None
"""
self.debug = debug
self.diagram = Diagram(debug=self.debug)
self.path_types = ["lfas_dstream", "lfas_link", "lfas_node"]
self.spf = spf(debug=self.debug)
def draw(self, graph, outdir, topology):
"""
Loop over the generated topologies and render them as diagram files.
:param networkx.Graph graph: NetworkX graph object
:param str outdir: string of the root output directory path
:param dict topology: topology paths dict
:return bool True: True if all diagrams rendered otherwise False
:rtype: bool
"""
self.diagram.gen_sub_dirs(graph, outdir, self.path_types, topology)
for src, dst in [
(s, d) for d in graph.nodes for s in graph.nodes if s != d
]:
for path_type in self.path_types:
if path_type not in topology[src][dst]:
continue
if len(topology[src][dst][path_type]) > 0:
frr_graph = graph.copy()
# Highlight the failed first-hop link(s) as red
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_link(
"red",
frr_graph,
path,
)
# Highlight the failed first-hop node(s) as red
if path_type == "lfas_dstream":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
elif path_type == "lfas_node":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
for path in topology[src][dst][path_type]:
frr_graph = self.diagram.highlight_links(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_nodes(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_src_dst(
"lightblue", dst, frr_graph, src
)
# Add labels to links showing their cost
frr_graph = self.diagram.label_link_weights(frr_graph)
self.diagram.gen_diagram(
(src + "_" + dst + "_" + path_type),
frr_graph,
os.path.join(outdir, src, path_type),
)
def gen_metric_paths(self, dst, graph, src):
"""
Return all lfa paths between the src and dst nodes in graph, based on
link metric (not hop count), which provide link, downstream, or node
protection, and return all alternate paths in a dict of lists keyed by
lfa path protection type.
:param str dst: Destination node name in graph
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node name in graph
:return lfas: dict of lists keyed by lfa type
:rtype: dict
"""
lfas = {"lfas_link": [], "lfas_dstream": [], "lfas_node": []}
if self.debug > 0:
print(f"Calculating for lfa paths from {src} to {dst}")
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
# There are no paths between this src,dst pair
if not s_d_paths:
return lfas
# Loop over each neighbour to check if each one is an lfa candidate
for nei in graph.neighbors(src):
# If dst is directly connceted
if nei == dst:
continue
if self.debug > 1:
print(f"Checking for lfa paths via {nei}")
# This nei is the next-hop for the current best path(s)
if nei in [path[1] for path in s_d_paths]:
if self.debug > 1:
print(
f"Rejected lfas via next-hop {nei}, it is a next-hop "
f"in the current best path(s): {s_d_paths}"
)
continue
"""
ECMP may be used meaning src has multiple equal cost best paths to
dst. And/or, nei may have multiple equal cost best paths to dst.
Regardless, of the number of paths, they are the same cost, so only
check the cost of the first best path of src against the first best
path of nei.
"""
nh = s_d_paths[0][1]
try:
n_d_cost = nx.dijkstra_path_length(graph, source=nei, target=dst)
n_s_cost = nx.dijkstra_path_length(graph, source=nei, target=src)
s_d_cost = nx.dijkstra_path_length(graph, source=src, target=dst)
n_nh_cost = nx.dijkstra_path_length(graph, source=nei, target=nh)
nh_d_cost = nx.dijkstra_path_length(graph, source=nh, target=dst)
except nx.exception.NetworkXNoPath:
# There isn't connectivity between the nodes; src, dst, nh, nei
continue
if self.debug > 1:
print(
f"{nei} -> {dst}: {n_d_cost}\n"
f"{nei} -> {src}: {n_s_cost}\n"
f"{src} -> {dst}: {s_d_cost}\n"
f"{nei} -> {nh}: {n_nh_cost}\n"
f"{nh} -> {dst}: {nh_d_cost}"
)
link_prot = False
down_prot = False
node_prot = False
"""
RFC5286:
Inequality 1: Loop-Free Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
toward destination D, that is link protecting, iff:
Distance_opt(N, D) < Distance_opt(N, S) + Distance_opt(S, D)
In this scenario, N's cost to D is lower than N's cost to S + S's
cost to D, so N must have an alternative path to D not via S, but
S and N might be sharing the same next-hop router, and N simply
has another link to that shared next-hop router, so it is link
protecting only, for S's link to it's next-hop.
"""
if n_d_cost < (n_s_cost + s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {src} + {src} to {dst}), "
f"{n_d_cost} < {n_s_cost+s_d_cost}"
)
# nei protects src against link failure to next-hop toward dst
link_prot = True
"""
RFC5286:
Inequality 2: Downstream Path Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
to downstream paths of D, which could be link or node protecting,
iff:
Distance_opt(N, D) < Distance_opt(S, D)
In this scenario, N's cost to D is lower than S's so N won't route
back to S. This is basic loop avoidance gaurenteed but it doesn't
restrict the lfa path to be link protecting or node protecting.
This scenario is usually used to provide protection for a specific
downstream prefix of node D rather than S's next-hop node or link
toward D.
"""
if n_d_cost < (s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < {src} to {dst}: "
f"{n_d_cost} < {n_s_cost}"
)
# nei protects src against failure of link or node toward dst
down_prot = True
"""
RFC5286:
Inequality 3: Criteria for a Node-Protecting Loop-Free Alternate
For an alternate next-hop N to protect against node failure of a
primary neighbor E for destination D, N must be loop-free with
respect to both E and D.
Distance_opt(N, D) < Distance_opt(N, E) + Distance_opt(E, D)
In this scenario, neighbour N of source router S, uses a different
next-hop router toward destination D, than router E which is S's
next-hop router toward D. This provides node protection against S's
next-hop router E.
"""
if n_d_cost < (n_nh_cost + nh_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {nh} + {nh} to {dst}), "
f"{n_d_cost} < {n_nh_cost+nh_d_cost}"
)
# nei protects src against next-hop node failure toward dst
node_prot = True
# nei might have multiple equal-cost best paths to dst
n_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=nei
)
for n_d_path in n_d_paths:
if link_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_link"].append(n_d_path)
if self.debug > 1:
print(
f"New link protecting lfa from {src} to "
f"{dst} via {nei}, protects against link "
f"{src}-{nh}: {n_d_path}"
)
if down_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_dstream"].append(n_d_path)
if self.debug > 1:
print(f"New downstream protecting lfa: {n_d_path}")
if node_prot:
"""
In order to protect pre-failure ECMP best-paths, check that
this node protecting path doesn't overlap with any of the
ECMP next-hop nodes
"""
s_d_fhs = [path[1] for path in s_d_paths]
overlap = [
fh
for fh in s_d_fhs
for n_d_path in n_d_paths
if fh in n_d_path
]
if overlap:
if self.debug > 1:
print(
f"lfa path {n_d_path} is not node protecting "
f"against {overlap} from {src} to {dst}"
)
continue
lfas["lfas_node"].append(n_d_path)
if self.debug > 1:
print(
f"New node protecting path from {src} to {dst} "
f"via {nei}, protects against node {nh}: "
f"{n_d_path}"
)
return lfas
def init_topo(self, graph, topo):
"""
Create empty dict keys for all possible paths this class can generate
:return None:
:rtype: None
"""
for src in graph.nodes:
for dst in graph.nodes:
if src == dst:
continue
for path_type in self.path_types:
if path_type not in topo[src][dst]:
topo[src][dst][path_type] = []
|
#!/usr/bin/env python3
"""
Functions for Dataset Caching
=============================
"""
import os
import pickle
def cached_to_file(filename):
"""Decorator to cache the output of a function to a file
Sometimes your workflow will contain functions that are executed once but
take a lot of time (typically data preprocessing). This can be annoying
when eg. running multiple experiments different parameters. This decorator
provides a solution by running the function once, then saving its output
to a file. Next time you called this function, and unless the file in
question has been deleted, the function will just read its result from the
file instead of recomputing everything.
Caveats:
- By default if you call the decorated function with different arguments,
this will still load the cached output from the first function call with
the *original arguments*. You need to add the `update_cache=True`
keyword argument to force the function to be rerun. Incidentally the
decorated function should not have an argument named `update_cache`.
- The serialization is done with pickle, so:
1. it isn't super secure (if you care about these things)
2. it only handles functions where the outputs can be pickled
(for now). Typically this wouldn't work for dynet objects.
Example usage:
.. code-block:: python
@cached_to_file("preprocessed_data.bin")
def preprocess(raw_data):
# do a lot of preprocessing
# [...] do something else
# This first call will run the function and pickle its output to
# "preprocessed_data.bin" (and return the output)
data = preprocess(raw_data)
# [...] do something else, or maybe rerun the program
# This will just load the output from "preprocessed_data.bin"
data = preprocess(raw_data)
# [...] do something else, or maybe rerun the program
# This will force the function to be rerun and the cached output to be
# updated. You should to that if for example the arguments of
# `preprocess` are expected to change
data = preprocess(raw_data, update_cache=True)
Args:
filename (str): Name of the file where the cached output should
be saved to.
"""
def _load_cached_output(func):
def wrapped_func(*args, update_cache=False, **kwargs):
if not os.path.isfile(filename) or update_cache:
# If the cached output doesn't exist, do all the processing
output = func(*args, **kwargs)
with open(filename, "wb") as f:
pickle.dump(output, f)
else:
# Other unpickle the preprocessed output
print(f"Loading cached output of {func.__name__} from "
f"{filename}")
with open(filename, "rb") as f:
output = pickle.load(f)
return output
return wrapped_func
return _load_cached_output
|
import subprocess
import pytest
class TestCli:
@pytest.fixture
def example_files(self, tmp_path):
tmp_dir = tmp_path / 'example'
tmp_dir.mkdir()
return tmp_dir / 'data.json', tmp_dir / 'schema.json'
def test_cli(self, example_files):
data_file, schema_file = example_files
data_file.write_text("\"hello\"")
schema_file.write_text("\"string\"")
command = ["jsonvl", data_file, schema_file]
pipes = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, std_err = pipes.communicate()
if pipes.returncode != 0:
message = std_err.strip()
raise Exception(message)
def test_cli_fail(self, example_files):
data_file, schema_file = example_files
data_file.write_text("\"hello\"")
schema_file.write_text("\"number\"")
command = ["jsonvl", data_file, schema_file]
pipes = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, std_err = pipes.communicate()
if pipes.returncode != 0:
message = str(std_err.strip())
expected = "Value hello is not of expected type number"
assert expected in message
|
# Problem: Third Maximum Number
#
# Given a non-empty array of integers, return the third maximum number in this array.
# If it does not exist, return the maximum number. The time complexity must be in O(n).
#
# Example 1:
# Input: [3, 2, 1]
# Output: 1
# Explanation: The third maximum is 1.
#
# Example 2:
# Input: [1, 2]
# Output: 2
# Explanation: The third maximum does not exist, so the maximum (2) is returned instead.
#
# Example 3:
# Input: [2, 2, 3, 1]
# Output: 1
# Explanation: Note that the third maximum here means the third maximum distinct number.
# Both numbers with value 2 are both considered as second maximum.
#
################################################################################
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
top = [float('-inf')] * 3
count = 0
for val in nums:
if val > top[0]:
top[0], top[1], top[2] = val, top[0], top[1]
count += 1
elif val != top[0] and val > top[1]:
top[1], top[2] = val, top[1]
count += 1
elif val != top[0] and val != top[1] and val > top[2]:
top[2] = val
count += 1
if count < 3:
return top[0]
else:
return top[2]
|
import os
import json
import tqdm
import dataset_maker
from argparse import ArgumentParser
# TODO
# 1. rename ids
# 2. upload make_dataset code
# 3. write readme.md file for constructing dataset
# 4. erase other stuff
def arg_parse():
parser = ArgumentParser()
parser.add_argument('--dataset', type=str, default='COCOseq')
parser.add_argument('--phase', type=str, default='all')
parser.add_argument('--data_source', '-src', type=str, default='data_source')
parser.add_argument('--dest', '-dst', type=str, default='data')
parser.add_argument('--download_thread', '-dth', type=int, default=8)
args = parser.parse_args()
args.data_source = os.path.join(args.data_source, args.dataset)
args.dest = os.path.join(args.dest, args.dataset)
return args
if __name__ == '__main__':
args = arg_parse()
maker = dataset_maker.maker[args.dataset](args.data_source, args.download_thread)
if not os.path.exists(args.dest):
os.makedirs(args.dest)
phase_token = args.phase
if args.phase == 'all':
phase_token = ''
id_fnames = [fname for fname in os.listdir(os.path.join(args.data_source, 'ids')) \
if phase_token in fname]
id_fnames.sort()
for id_fname in tqdm.tqdm(id_fnames, desc='Total'):
with open(os.path.join(args.data_source, 'ids', id_fname), 'r') as f:
ids = json.load(f)
data_fname = id_fname.replace('_id_', '_{data}_').replace('.json', '.{ext}')
maker.make(ids, os.path.join(args.dest, data_fname))
maker.save_multihotdict(os.path.join(args.dest, 'multi_hot_dict_{dataset_name}.json'))
|
# Print a sorted list of entities and their kind
# Todo: Don't print entites from the Ada Standard library
# Hint: See sample 3
import understand
import sys
def sortedEntities(db):
for ent in sorted(db.ents(),key= lambda ent: ent.name()):
print (ent.name()," [",ent.kindname(),"]",sep="",end="\n")
if __name__ == '__main__':
# Open Database
args = sys.argv
db = understand.open(args[1])
sortedEntities(db)
|
from .tsv_utils import baseline_df
import shutil
from sklearn.model_selection import StratifiedKFold
sex_dict = {'M': 0, 'F': 1}
if __name__ == "__main__":
import argparse
import pandas as pd
import os
from os import path
import numpy as np
parser = argparse.ArgumentParser(description="Argparser for data formatting")
# Mandatory arguments
parser.add_argument("formatted_data_path", type=str,
help="Path to the folder containing formatted data.")
# Modality selection
parser.add_argument("--n_splits", type=int, default=5,
help="Define the number of subjects to put in test set."
"If 0, there is no training set and the whole dataset is considered as a test set.")
parser.add_argument("--MCI_sub_categories", action="store_true", default=False,
help="Manage MCI sub-categories to avoid data leakage")
parser.add_argument("--subset_name", type=str, default="validation",
help="Name of the subset that is complementary to train.")
args = parser.parse_args()
# Read files
results_path = args.formatted_data_path
train_path = path.join(results_path, 'train_splits-' + str(args.n_splits))
if path.exists(train_path):
shutil.rmtree(train_path)
os.makedirs(train_path)
for i in range(args.n_splits):
os.mkdir(path.join(train_path, 'split-' + str(i)))
test_path = path.join(results_path, args.subset_name + '_splits-' + str(args.n_splits))
if path.exists(test_path):
shutil.rmtree(test_path)
os.makedirs(test_path)
for i in range(args.n_splits):
os.mkdir(path.join(test_path, 'split-' + str(i)))
diagnosis_df_paths = os.listdir(results_path)
diagnosis_df_paths = [x for x in diagnosis_df_paths if x.endswith('.tsv')]
diagnosis_df_paths = [x for x in diagnosis_df_paths if not x.endswith('_baseline.tsv')]
MCI_special_treatment = False
if args.MCI_sub_categories and 'MCI.tsv' in diagnosis_df_paths:
diagnosis_df_paths.remove('MCI.tsv')
MCI_special_treatment = True
# The baseline session must be kept before or we are taking all the sessions to mix them
for diagnosis_df_path in diagnosis_df_paths:
print(diagnosis_df_path)
diagnosis = diagnosis_df_path.split('.')[0]
print(diagnosis)
diagnosis_df = pd.read_csv(path.join(results_path, diagnosis_df_path), sep='\t')
diagnosis_baseline_df = baseline_df(diagnosis_df, diagnosis)
diagnoses_list = list(diagnosis_baseline_df.diagnosis)
unique = list(set(diagnoses_list))
y = np.array(
[unique.index(x) for x in diagnoses_list]) # There is one label per diagnosis depending on the order
splits = StratifiedKFold(n_splits=args.n_splits, shuffle=True, random_state=2)
for i, indices in enumerate(splits.split(np.zeros(len(y)), y)):
train_index, test_index = indices
test_df = diagnosis_baseline_df.iloc[test_index]
train_df = diagnosis_baseline_df.iloc[train_index]
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, 'split-' + str(i), str(diagnosis) + '.tsv'),
sep='\t', index=False)
train_df.to_csv(
path.join(train_path, 'split-' + str(i), str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
test_df.to_csv(
path.join(test_path, 'split-' + str(i), str(diagnosis) + '_baseline.tsv'), sep='\t', index=False)
if MCI_special_treatment:
# Extraction of MCI subjects without intersection with the sMCI / pMCI train
diagnosis_df = pd.read_csv(path.join(results_path, 'MCI.tsv'), sep='\t')
MCI_df = diagnosis_df.set_index(['participant_id', 'session_id'])
supplementary_diagnoses = []
print('Before subjects removal')
sub_df = diagnosis_df.reset_index().groupby('participant_id')['session_id'].nunique()
print('%i subjects, %i scans' % (len(sub_df), len(diagnosis_df)))
if 'sMCI.tsv' in diagnosis_df_paths:
sMCI_baseline_df = pd.read_csv(path.join(results_path, 'sMCI_baseline.tsv'), sep='\t')
for idx in sMCI_baseline_df.index.values:
subject = sMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True, level=0)
supplementary_diagnoses.append('sMCI')
print('Removed %i subjects' % len(sMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
print('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if 'pMCI.tsv' in diagnosis_df_paths:
pMCI_baseline_df = pd.read_csv(path.join(results_path, 'pMCI_baseline.tsv'), sep='\t')
for idx in pMCI_baseline_df.index.values:
subject = pMCI_baseline_df.loc[idx, 'participant_id']
MCI_df.drop(subject, inplace=True, level=0)
supplementary_diagnoses.append('pMCI')
print('Removed %i subjects' % len(pMCI_baseline_df))
sub_df = MCI_df.reset_index().groupby('participant_id')['session_id'].nunique()
print('%i subjects, %i scans' % (len(sub_df), len(MCI_df)))
if len(supplementary_diagnoses) == 0:
raise ValueError('The MCI_sub_categories flag is not needed as there are no intersections with'
'MCI subcategories.')
diagnosis_baseline_df = baseline_df(MCI_df, 'MCI', False)
diagnoses_list = list(diagnosis_baseline_df.diagnosis)
unique = list(set(diagnoses_list))
y = np.array(
[unique.index(x) for x in diagnoses_list]) # There is one label per diagnosis depending on the order
splits = StratifiedKFold(n_splits=args.n_splits, shuffle=True, random_state=2)
for i, indices in enumerate(splits.split(np.zeros(len(y)), y)):
train_index, test_index = indices
test_df = diagnosis_baseline_df.iloc[test_index]
train_df = diagnosis_baseline_df.iloc[train_index]
# Add the sub categories
for diagnosis in supplementary_diagnoses:
sup_train_df = pd.read_csv(path.join(train_path, 'split-' + str(i), str(diagnosis) + '_baseline.tsv'),
sep='\t')
train_df = pd.concat([train_df, sup_train_df])
sup_test_df = pd.read_csv(path.join(test_path, 'split-' + str(i), str(diagnosis) + '_baseline.tsv'),
sep='\t')
test_df = pd.concat([test_df, sup_test_df])
train_df.reset_index(inplace=True, drop=True)
test_df.reset_index(inplace=True, drop=True)
train_df.diagnosis = ['MCI'] * len(train_df)
test_df.diagnosis = ['MCI'] * len(test_df)
# Retrieve all sessions for the training set
complete_train_df = pd.DataFrame()
for idx in train_df.index.values:
subject = train_df.loc[idx, 'participant_id']
subject_df = diagnosis_df[diagnosis_df.participant_id == subject]
complete_train_df = pd.concat([complete_train_df, subject_df])
complete_train_df.to_csv(path.join(train_path, 'split-' + str(i), 'MCI.tsv'),
sep='\t', index=False)
train_df.to_csv(
path.join(train_path, 'split-' + str(i), 'MCI_baseline.tsv'), sep='\t', index=False)
test_df.to_csv(
path.join(test_path, 'split-' + str(i), 'MCI_baseline.tsv'), sep='\t', index=False)
|
import pytest
from CreeDictionary.API.models import Wordform
from CreeDictionary.CreeDictionary.paradigm.filler import (
EmptyRowType,
TitleRow,
InflectionCell,
Layout,
)
from CreeDictionary.utils import ParadigmSize
from CreeDictionary.CreeDictionary.paradigm.generation import generate_paradigm
@pytest.mark.parametrize(
"lemma,examples",
[
# VTA
("wâpamêw", ["wâpamêw", "niwâpamâw", "kiwâpamitin", "ê-wâpamât"]),
# VAI
("nipâw", ["nipâw", "ninipân", "kinipân", "ninipânân"]),
# VTI
("mîcisow", ["mîcisow", "nimîcison", "kimîcison", "ê-mîcisoyit"]),
# VII
("nîpin", ["nîpin", "nîpin", "ê-nîpihk"]),
# NAD
("nôhkom", ["nôhkom", "kôhkom", "ohkoma"]),
# NID
("mîpit", ["mîpit", "nîpit", "kîpit", "wîpit"]),
# NA
("minôs", ["minôs", "minôsak", "minôsa"]),
# NI
("nipiy", ["nipiy", "nipîhk", "ninipiy", "kinipiy"]),
],
)
@pytest.mark.django_db
def test_paradigm(lemma: str, examples: list[str]):
"""
Test we can generate a paradigm from a given lemma.
"""
wordform = Wordform.objects.get(text=lemma, is_lemma=True)
paradigms = generate_paradigm(wordform, ParadigmSize.BASIC)
for inflection in examples:
assert paradigms_contain_inflection(paradigms, inflection)
def paradigms_contain_inflection(paradigms: list[Layout], inflection: str) -> bool:
for paradigm in paradigms:
for row in paradigm:
if isinstance(row, (EmptyRowType, TitleRow)):
continue
for cell in row:
if isinstance(cell, InflectionCell) and cell.inflection == inflection:
return True
return False
|
import logging
import struct
import six
from voltron.view import *
from voltron.plugin import *
from voltron.api import *
log = logging.getLogger("view")
class MemoryView (TerminalView):
printable_filter = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
async = True
@classmethod
def configure_subparser(cls, subparsers):
sp = subparsers.add_parser('memory', help='display a chunk of memory', aliases=('m', 'mem'))
VoltronView.add_generic_arguments(sp)
group = sp.add_mutually_exclusive_group(required=False)
group.add_argument('--deref', '-d', action='store_true', help=('display the data in a column one CPU word wide '
'and dereference any valid pointers'), default=False)
group.add_argument('--bytes', '-b', action='store', type=int, help='bytes per line (default 16)', default=16)
sp.add_argument('--reverse', '-v', action='store_true', help='reverse the output', default=False)
group = sp.add_mutually_exclusive_group(required=True)
group.add_argument('--address', '-a', action='store', help='address (in hex) from which to start reading memory')
group.add_argument('--command', '-c', action='store', help=('command to execute resulting in the address from '
'which to start reading memory. voltron will do his almighty best to find an address in the output by '
'splitting it on whitespace and searching from the end of the list of tokens. e.g. "print \$rip + 0x1234"'),
default=None)
group.add_argument('--register', '-r', action='store', help='register containing the address from which to start reading memory', default=None)
sp.set_defaults(func=MemoryView)
def render(self):
height, width = self.window_size()
target = None
# check args
if self.args.register:
args = {'register': self.args.register}
elif self.args.command:
args = {'command': self.args.command}
else:
args = {'address': self.args.address}
if self.args.deref:
args['words'] = height
else:
args['length'] = height*self.args.bytes
# get memory and target info
m_res, t_res = self.client.send_requests(
api_request('memory', block=self.block, deref=True, **args),
api_request('targets', block=self.block))
# don't render if it timed out, probably haven't stepped the debugger again
if t_res.timed_out:
return
if t_res and t_res.is_success and len(t_res.targets) > 0:
target = t_res.targets[0]
if self.args.deref:
self.args.bytes = target['addr_size']
if m_res and m_res.is_success:
lines = []
for c in range(0, m_res.bytes, self.args.bytes):
chunk = m_res.memory[c:c+self.args.bytes]
addr_str = self.colour(self.format_address(m_res.address + c, size=target['addr_size'], pad=False),
self.config.format.addr_colour)
if self.args.deref:
fmt = ('<' if target['byte_order'] == 'little' else '>') + \
{2: 'H', 4: 'L', 8: 'Q'}[target['addr_size']]
info_str = ''
if len(chunk) == target['addr_size']:
pointer = list(struct.unpack(fmt, chunk))[0]
memory_str = ' '.join(["%02X" % x for x in six.iterbytes(chunk)])
info_str = self.format_deref(m_res.deref.pop(0))
else:
memory_str = ' '.join(["%02X" % x for x in six.iterbytes(chunk)])
info_str = ''
ascii_str = ''.join(["%s" % ((x <= 127 and self.printable_filter[x]) or '.') for x in six.iterbytes(chunk)])
divider = self.colour('|', self.config.format.divider_colour)
lines.append('{}: {} {} {} {} {}'.format(addr_str, memory_str, divider, ascii_str, divider, info_str))
self.body = '\n'.join(reversed(lines)).strip() if self.args.reverse else '\n'.join(lines)
self.info = '[0x{0:0=4x}:'.format(len(m_res.memory)) + self.config.format.addr_format.format(m_res.address) + ']'
else:
log.error("Error reading memory: {}".format(m_res.message))
self.body = self.colour(m_res.message, 'red')
self.info = ''
else:
self.body = self.colour("Failed to get targets", 'red')
if not self.title:
self.title = "[memory]"
super(MemoryView, self).render()
def format_address(self, address, size=8, pad=True, prefix='0x'):
fmt = '{:' + ('0=' + str(size*2) if pad else '') + 'X}'
addr_str = fmt.format(address)
if prefix:
addr_str = prefix + addr_str
return addr_str
def format_deref(self, deref, size=8):
fmtd = []
for t,item in deref:
if t == "pointer":
fmtd.append(self.format_address(item, size=size, pad=False))
elif t == "string":
item = item.replace('\n', '\\n')
fmtd.append(self.colour('"' + item + '"', self.config.format.string_colour))
elif t == "symbol":
fmtd.append(self.colour('`' + item + '`', self.config.format.symbol_colour))
elif t == "circular":
fmtd.append(self.colour('(circular)', self.config.format.divider_colour))
return self.colour(' => ', self.config.format.divider_colour).join(fmtd)
class MemoryViewPlugin(ViewPlugin):
plugin_type = 'view'
name = 'memory'
view_class = MemoryView
class StackView(MemoryView):
@classmethod
def configure_subparser(cls, subparsers):
sp = subparsers.add_parser('stack', help='display a chunk of stack memory', aliases=('s', 'st'))
VoltronView.add_generic_arguments(sp)
sp.set_defaults(func=StackView)
def render(self):
self.args.reverse = True
self.args.deref = True
self.args.register = 'sp'
self.args.command = None
self.args.address = None
self.args.bytes = None
self.title = '[stack]'
super(StackView, self).render()
class StackViewPlugin(ViewPlugin):
plugin_type = 'view'
name = 'stack'
view_class = StackView
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------------
# stress test waflz_server
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import sys
import argparse
import time
import signal
import json
import random
import base64
import datetime
from urllib.request import urlopen
from urllib.request import Request
# ------------------------------------------------------------------------------
# Globals
# ------------------------------------------------------------------------------
g_run = True
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def signal_handler(signal, frame):
global g_run
g_run = False
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def print_banner():
print('+-----------------------------------------------------------------------------+')
print('| W A F L Z S E R V E R S T R E S S T E S T E R |')
print('+------------+------------+------------+------------+------------+------------+')
print('| Req/s | 200s | 300s | 400s | 500s | Confs/s |')
print('+------------+------------+------------+------------+------------+------------+')
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def print_stats_line(a_time_delta_ms, a_num_reqs, a_num_configs, a_results):
if '200' not in a_results:
a_results['200'] = 0
if '300' not in a_results:
a_results['300'] = 0
if '400' not in a_results:
a_results['400'] = 0
if '500' not in a_results:
a_results['500'] = 0
print('| %10.2f | %10d | %10d | %10d | %10d | %10d |' % (
(a_num_reqs*1000/a_time_delta_ms),
a_results['200'],
a_results['300'],
a_results['400'],
a_results['500'],
(a_num_configs*1000/a_time_delta_ms)))
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def get_rqst(a_host, a_id, a_vectors, a_idx, a_results):
# print('get_rqst: a_idx: %d' % (a_idx))
# time.sleep(1.0)
l_url = a_host
l_v = a_vectors[a_idx]
l_headers = {'x-ec-waf-instance-id': str(a_id)}
l_body = ''
#l_headers = {'x-ec-waf-instance-id': str(1)}
if 'uri' in l_v:
l_url = '%s/%s'%(a_host, l_v['uri'])
if 'query_string' in l_v:
l_url += '?'
l_url += l_v['query_string']
if 'headers' in l_v and len(l_v['headers']):
l_headers.update(l_v['headers'])
if 'body' in l_v:
l_body = base64.b64decode(l_v['body'])
else:
l_body = l_body.encode()
l_r = None
try:
# print('l_url: %s'%(l_url))
# print('l_headers: %s'%(l_headers))
l_rq = Request(url=l_url,
data=l_body,
headers=l_headers)
l_r = urlopen(l_rq, timeout=20.0)
except Exception as l_e:
print('error requesting. Reason: %s error: %s, doc: %s' % (
type(l_e), l_e, l_e.__doc__))
pass
# ------------------------------------------------------
# codes
# ------------------------------------------------------
if not l_r:
if '500' in a_results:
a_results['500'] += 1
else:
a_results['500'] = 1
else:
l_code = l_r.getcode()
if l_code >= 200 and l_code < 300:
if '200' in a_results:
a_results['200'] += 1
else:
a_results['200'] = 1
if l_code >= 300 and l_code < 400:
if '300' in a_results:
a_results['300'] += 1
else:
a_results['300'] = 1
if l_code >= 400 and l_code < 500:
if '400' in a_results:
a_results['400'] += 1
else:
a_results['400'] = 1
if l_code >= 500 and l_code < 600:
if '500' in a_results:
a_results['500'] += 1
else:
a_results['500'] = 1
#if l_r.getcode() != 200:
# print('error: performing GET to %s -status: %d. Response: %s' % (l_url, l_r.getcode(), l_body))
# sys.exit(1)
#try:
# l_body = l_r.read()
# l_r_json = json.loads(l_body)
#except:
# print('error: performing POST to %s Response not json' % (l_url))
# sys.exit(1)
#print(l_r_json)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def post_config(a_host, a_template, a_idx):
# print('post_config: a_idx: %s' % (a_idx)
# print(json.dumps(a_template)
# if 'id' in a_template:
# a_template['id'] = str(a_idx)
# else:
# a_template['id'] = '1'
if isinstance(a_template, list):
for l_instance in a_template:
if 'last_modified_date' in l_instance:
l_instance['last_modified_date'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if 'id' in l_instance:
l_instance['id'] = str(a_idx)
else:
l_instance['id'] = '1'
else:
if "last_modified_date" in a_template:
a_template['last_modified_date'] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
if 'id' in a_template:
a_template['id'] = str(a_idx)
else:
a_template['id'] = '1'
l_headers = {}
l_headers['Content-type'] = 'application/json'
l_url = '%s/update_instance'%(a_host)
l_body = json.dumps(a_template)
# print(l_body)
# ------------------------------------------------------
# urlopen (POST)
# ------------------------------------------------------
try:
l_rq = Request(l_url, l_body.encode(), l_headers)
l_r = urlopen(l_rq, timeout=20.0)
except Exception as l_e:
print('error: performing POST to %s. Exception: %s' % (l_url, l_e))
sys.exit(1)
l_body = l_r.read()
if l_r.getcode() != 200:
print('error: performing POST to %s -status: %d. Response: %s' % (l_url, l_r.getcode(), l_body))
sys.exit(1)
# try:
# l_body = l_r.read()
# l_r_json = json.loads(l_body)
# except:
# print('error: performing POST to %s Response not json' % (l_url))
# sys.exit(1)
# print(l_r_json)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def waflz_server_stress(a_verbose,
a_port,
a_template,
a_vector_file,
a_num_ids):
global g_run
l_host = 'http://127.0.0.1:%d'%(a_port)
# ------------------------------------------------------
# read template
# ------------------------------------------------------
l_template = []
try:
with open(a_template) as l_f:
l_template = json.load(l_f)
except Exception as l_e:
print('error opening template file: %s. Reason: %s error: %s, doc: %s' % (
a_vector_file, type(l_e), l_e, l_e.__doc__))
sys.exit(-1)
print('preloading configs')
l_time_ms_last = time.time()*1000
i_c = 0
while g_run:
i_c += 1
post_config(l_host, l_template, i_c)
l_time_ms_cur = time.time()*1000
if l_time_ms_cur > (l_time_ms_last + 100):
l_time_ms_last = time.time()*1000
l_time_ms_next = l_time_ms_last + 100
print('%6.2f done'%((((float(i_c))) / (a_num_ids)) *100.0))
if i_c == a_num_ids:
break
if not g_run:
return
print_banner()
# ------------------------------------------------------
# read vector file
# ------------------------------------------------------
l_vectors = []
try:
with open(a_vector_file) as l_f:
l_vectors = json.load(l_f)
except Exception as l_e:
print('error opening vector file: %s. Reason: %s error: %s, doc: %s' % (
a_vector_file, type(l_e), l_e, l_e.__doc__))
sys.exit(-1)
# print(l_vectors)
# ------------------------------------------------------
# setup
# ------------------------------------------------------
l_v_size = len(l_vectors)
l_v_idx = 0
l_time_ms_last = time.time()*1000
l_num_reqs = 0
l_num_reqs_total = 0
l_num_confs = 0
l_num_confs_total = 0
l_results = {}
# ------------------------------------------------------
# run...
# ------------------------------------------------------
while g_run:
l_id = random.randint(1, a_num_ids)
get_rqst(l_host, l_id, l_vectors, l_v_idx, l_results)
l_v_idx += 1
if l_v_idx >= l_v_size:
l_v_idx = 0
l_num_reqs += 1
l_num_reqs_total += 1
if l_num_reqs_total % 100 == 0:
post_config(l_host, l_template, int(l_id))
l_num_confs += 1
l_num_confs_total += 1
l_time_ms_cur = time.time()*1000
if l_time_ms_cur > (l_time_ms_last + 100):
print_stats_line(l_time_ms_cur - l_time_ms_last, l_num_reqs, l_num_confs, l_results)
l_time_ms_last = time.time()*1000
l_time_ms_next = l_time_ms_last + 100
l_num_reqs = 0
l_num_confs = 0
l_results = {}
# ------------------------------------------------------
# done...
# ------------------------------------------------------
print('...shutting down...')
# ------------------------------------------------------------------------------
# main
# ------------------------------------------------------------------------------
def main(argv):
l_arg_parser = argparse.ArgumentParser(
description='waflz_server stress tester',
usage='%(prog)s -t <instance_template>',
epilog='')
l_arg_parser.add_argument(
'-v',
'--verbose',
dest='verbose',
help='Verbosity.',
action='store_true',
default=False,
required=False)
l_arg_parser.add_argument(
'-t',
'--template',
dest='template',
help='instance template (REQUIRED).',
required=True)
l_arg_parser.add_argument(
'-x',
'--vectors',
dest='vector_file',
help='request vector file (REQUIRED).',
required=True)
l_arg_parser.add_argument(
'-p',
'--port',
dest='port',
help='waflz_server port (default: 12345).',
default=12345,
type=int,
required=False)
l_arg_parser.add_argument(
'-n',
'--num_ids',
dest='num_ids',
help='number of account id\'s to cycle through (default: 10).',
type=int,
default=1,
required=False)
l_args = l_arg_parser.parse_args()
signal.signal(signal.SIGINT, signal_handler)
waflz_server_stress(a_verbose=l_args.verbose,
a_port=l_args.port,
a_template=l_args.template,
a_vector_file=l_args.vector_file,
a_num_ids=l_args.num_ids)
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
if __name__ == "__main__":
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
"""Tests for :mod:`~astronat.utils.table.utils`."""
__all__ = [
"test_rename_columns",
"test_cast_columns",
]
##############################################################################
# IMPORTS
import astropy.units as u
from astropy.table import QTable
from astronat.utils.table import utils
##############################################################################
# PARAMETERS
tbl = QTable([[2.0, 5.0], ["x", "y"]], names=("a", "b"))
##############################################################################
# CODE
##############################################################################
def test_rename_columns():
"""Test `~astronat.utils.table.utils.rename_columns`."""
utils.rename_columns(table=tbl, rename={"a": "A"})
assert tbl.colnames == ["A", "b"]
utils.rename_columns(table=tbl, rename={"A": "a"})
assert tbl.colnames == ["a", "b"]
# /def
# -------------------------------------------------------------------
def test_cast_columns():
"""Test `~astronat.utils.table.utils.rename_columns`."""
utils.cast_columns(table=tbl, recast={"a": lambda x: x * u.km})
assert all(tbl["a"] == [2.0, 5.0] * u.km)
# /def
##############################################################################
# END
|
import idiokit
class Counter(object):
def __init__(self):
self.keys = dict()
def get(self, key):
return self.keys.get(key, ())
def contains(self, key, value=None):
self.inc(key, value)
return not self.dec(key, value)
def inc(self, key, value=None):
if key not in self.keys:
self.keys[key] = dict()
if value not in self.keys[key]:
self.keys[key][value] = 1
return True
self.keys[key][value] += 1
return False
def dec(self, key, value=None):
if key not in self.keys:
return True
if value not in self.keys[key]:
return True
self.keys[key][value] -= 1
if self.keys[key][value] <= 0:
del self.keys[key][value]
if not self.keys[key]:
del self.keys[key]
return True
return False
def __nonzero__(self):
return not not self.keys
def __iter__(self):
for key, values in self.keys.iteritems():
yield key, values
class TaskStopped(Exception):
pass
class TaskFarm(object):
def __init__(self, task, signal=TaskStopped(), grace_period=1.0):
self.task = task
self.signal = signal
self.grace_period = grace_period
self.tasks = dict()
self.counter = Counter()
def _key(self, *args, **keys):
return tuple(args), frozenset(keys.items())
@idiokit.stream
def _cleanup(self, key):
try:
yield idiokit.consume()
finally:
if self.counter.dec(key):
yield idiokit.sleep(self.grace_period)
if not self.counter.contains(key) and key in self.tasks:
task = self.tasks.pop(key)
task.throw(self.signal)
def inc(self, *args, **keys):
key = self._key(*args, **keys)
if self.counter.inc(key) and key not in self.tasks:
self.tasks[key] = self.task(*args, **keys)
task = self.tasks[key]
fork = task.fork()
idiokit.pipe(self._cleanup(key), fork)
return fork
def get(self, *args, **keys):
key = self._key(*args, **keys)
if key not in self.tasks:
return None
return self.tasks[key]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
monday = pd.read_csv('data/monday.csv', delimiter =';')
tuesday = pd.read_csv('data/tuesday.csv', delimiter =';' )
wednesday = pd.read_csv('data/wednesday.csv', delimiter =';')
thursday = pd.read_csv('data/thursday.csv', delimiter =';')
friday = pd.read_csv('data/friday.csv', delimiter =';')
df = pd.concat([monday, tuesday, wednesday, thursday, friday], axis = 0)
df['timestamp'] = pd.to_datetime(df['timestamp'])
conditions = [
(df['location'] == "fruit"),
(df['location'] == "spices"),
(df['location'] == "dairy"),
(df['location'] == "drinks"),
]
values = [4, 3, 5, 6]
df['revenue_per_minute'] = np.select(conditions, values)
df["unq_id"] = 'Cust_' + df["timestamp"].astype(str) + '_no_' + df["customer_no"].astype(str)
df = df[['timestamp', 'unq_id', 'customer_no', 'location' , 'revenue_per_minute']]
#######################
# trans prop
########################
df_tp = df.sort_values(['customer_no', 'timestamp'])
df_tp.set_index('timestamp', inplace=True)
df_tp = df_tp.groupby('customer_no').resample('1min').fillna('ffill')
df_tp['before'] = df_tp['location'].shift(1)
trans_prob = pd.crosstab(df_tp['location'], df_tp['before'], normalize=0)
trans_prob.to_csv('data/trans_matrix_prob.csv', index=True)
#######################
# prepped data
######################
df_prep = df.sort_values(['customer_no', 'timestamp'])
df_prep.set_index('timestamp', inplace=True)
df_prep = df_prep.groupby('unq_id').resample('1min').fillna('ffill')
df_prep['before'] = df_prep['location'].shift(1)
df_prep = df_prep.drop(columns=['unq_id'])
df_prep.to_csv('data/df_prepped.csv', index=True)
|
from vyked import Bus
from ..golem.golem import Golem
REGISTRY_HOST = '127.0.0.1'
REGISTRY_PORT = 4500
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
if __name__ == '__main__':
bus = Bus()
Article, tcp_service, http_service = Golem.generate("Article", [('username', 'id'), ('email', str)])
tcp_service.ronin = True
http_service.ronin = True
bus.serve_tcp(tcp_service)
bus.serve_http(http_service)
bus.start(REGISTRY_HOST, REGISTRY_PORT, REDIS_HOST, REDIS_PORT)
|
import base64
import json
import os
import requests
from apmserver import ServerBaseTest, ElasticTest
from apmserver import TimeoutError, integration_test
from test_apikey_cmd import APIKeyHelper
from helper import wait_until
def headers(auth=None, content_type='application/x-ndjson'):
h = {'content-type': content_type}
if auth is not None:
auth_headers = {'Authorization': auth}
auth_headers.update(h)
return auth_headers
return h
class TestAccessDefault(ServerBaseTest):
"""
Unsecured endpoints
"""
def test_full_access(self):
"""
Test that authorized API Key is not accepted when API Key usage is disabled
"""
events = self.get_event_payload()
# access without token allowed
resp = requests.post(self.intake_url, data=events, headers=headers())
assert resp.status_code == 202, resp.status_code
# access with any Bearer token allowed
resp = requests.post(self.intake_url, data=events, headers=headers(auth="Bearer 1234"))
assert resp.status_code == 202, resp.status_code
# access with any API Key allowed
resp = requests.post(self.intake_url, data=events, headers=headers(auth=""))
assert resp.status_code == 202, resp.status_code
@integration_test
class APIKeyBaseTest(ElasticTest):
def setUp(self):
# application
self.application = "apm"
# apm privileges
self.privilege_agent_config = "config_agent:read"
self.privilege_event = "event:write"
self.privilege_sourcemap = "sourcemap:write"
self.privileges = {
"agentConfig": self.privilege_agent_config,
"event": self.privilege_event,
"sourcemap": self.privilege_sourcemap
}
self.privileges_all = list(self.privileges.values())
self.privilege_any = "*"
# resources
self.resource_any = ["*"]
self.resource_backend = ["-"]
user = os.getenv("ES_USER", "apm_server_user")
password = os.getenv("ES_PASS", "changeme")
self.apikey_name = "apm-systemtest"
self.apikey = APIKeyHelper(self.get_elasticsearch_url(user, password))
# delete all existing api_keys with defined name of current user
self.apikey.invalidate(self.apikey_name)
# delete all existing application privileges to ensure they can be created for current user
for p in self.privileges.keys():
url = "{}/{}/{}".format(self.apikey.privileges_url, self.application, p)
requests.delete(url)
wait_until(lambda: requests.get(url).status_code == 404)
super(APIKeyBaseTest, self).setUp()
def create_api_key_header(self, privileges, resources, application="apm"):
return "ApiKey {}".format(self.create_apm_api_key(privileges, resources, application=application))
def create_apm_api_key(self, privileges, resources, application="apm"):
payload = json.dumps({
"name": self.apikey_name,
"role_descriptors": {
self.apikey_name + "role_desc": {
"applications": [
{"application": application, "privileges": privileges, "resources": resources}]}}})
resp = self.apikey.create(payload)
enc = "utf-8"
return str(base64.b64encode("{}:{}".format(resp["id"], resp["api_key"]).encode(enc)), enc)
@integration_test
class TestAPIKeyCache(APIKeyBaseTest):
def config(self):
cfg = super(TestAPIKeyCache, self).config()
cfg.update({"api_key_enabled": True, "api_key_limit": 5})
return cfg
def test_cache_full(self):
"""
Test that authorized API Key is not accepted when cache is full
api_key.limit: number of unique API Keys per minute => cache size
"""
key1 = self.create_api_key_header([self.privilege_event], self.resource_any)
key2 = self.create_api_key_header([self.privilege_event], self.resource_any)
def assert_intake(api_key, authorized):
resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(api_key))
if authorized:
assert resp.status_code != 401, "token: {}, status_code: {}".format(api_key, resp.status_code)
else:
assert resp.status_code == 401, "token: {}, status_code: {}".format(api_key, resp.status_code)
# fill cache up until one spot
for i in range(4):
assert_intake("ApiKey xyz{}".format(i), authorized=False)
# allow for authorized api key
assert_intake(key1, True)
# hit cache size
assert_intake(key2, False)
# still allow already cached api key
assert_intake(key1, True)
@integration_test
class TestAPIKeyWithInvalidESConfig(APIKeyBaseTest):
def config(self):
cfg = super(TestAPIKeyWithInvalidESConfig, self).config()
cfg.update({"api_key_enabled": True, "api_key_es": "localhost:9999"})
return cfg
def test_backend_intake(self):
"""
API Key cannot be verified when invalid Elasticsearch instance configured
"""
name = "system_test_invalid_es"
key = self.create_api_key_header([self.privilege_event], self.resource_any)
resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(key))
assert resp.status_code == 401, "token: {}, status_code: {}".format(key, resp.status_code)
@integration_test
class TestAPIKeyWithESConfig(APIKeyBaseTest):
def config(self):
cfg = super(TestAPIKeyWithESConfig, self).config()
cfg.update({"api_key_enabled": True, "api_key_es": self.get_elasticsearch_url()})
return cfg
def test_backend_intake(self):
"""
Use dedicated Elasticsearch configuration for API Key validation
"""
key = self.create_api_key_header([self.privilege_event], self.resource_any)
resp = requests.post(self.intake_url, data=self.get_event_payload(), headers=headers(key))
assert resp.status_code == 202, "token: {}, status_code: {}".format(key, resp.status_code)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-21 22:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guides', '0002_add_languages'),
]
operations = [
migrations.AddField(
model_name='guide',
name='gender',
field=models.CharField(blank=True, max_length=32, null=True, verbose_name='Gender'),
),
migrations.AlterField(
model_name='guide',
name='additional_info',
field=models.TextField(blank=True, verbose_name='Is there anything else we should know?'),
),
migrations.AlterField(
model_name='guide',
name='arrival_date',
field=models.CharField(max_length=64, verbose_name='What date are you arriving at then next IETF meeting (MM/DD/YYYY)?'),
),
migrations.AlterField(
model_name='guide',
name='give_intro',
field=models.CharField(choices=[('YES', 'Yes'), ('NO', 'No'), ('MAYBE', 'Maybe')], default='YES', help_text='<em>(Sometimes it is not possible to exactly match guides with participants and their preferred technical areas)</em>', max_length=32, verbose_name='Are you willing to give a general introduction of the IETF to a newcomer program participant?'),
),
migrations.AlterField(
model_name='participant',
name='additional_info',
field=models.TextField(blank=True, verbose_name='Is there anything else you would like to share with us?'),
),
migrations.AlterField(
model_name='participant',
name='areas',
field=models.CharField(help_text='<p>IETF Areas include:</p>\n<ul>\n<li>ART: Applications and Real-Time</li>\n<li>INT: Internet</li>\n<li>OPS: Operations and Management</li>\n<li>RTG: Routing</li>\n<li>SEC: Security</li>\n<li>TSG: Transport</li>\n<li>UNKNOWN: I don\'t know yet</li>\n</ul><p>Further information <a href="https://www.ietf.org/topics/areas/"> is also avaliable about IETF areas</a>', max_length=64, verbose_name='What IETF area(s) most interest you'),
),
migrations.AlterField(
model_name='participant',
name='attend',
field=models.CharField(choices=[('None', 'This is my first IETF meeting'), ('One', 'This is my second IETF meeting'), ('Two', 'This is my third IETF meeting'), ('Three', 'This is my fourth IETF meeting')], default='None', max_length=32, verbose_name='Number of IETFs attended'),
),
migrations.AlterField(
model_name='participant',
name='groups',
field=models.CharField(help_text='see <a href="https://wwww.ietf.org/how/wgs">https://wwww.ietf.org/how/wgs</a>', max_length=256, verbose_name='Which working groups are you most interested in?'),
),
]
|
from rayintegral_kernels.kernel import TrapezoidKernel, RandomKernel, RBF, M52, M32, M12, RQ
import tensorflow as tf
from rayintegral_kernels import float_type
import numpy as np
import pylab as plt
def main():
"""
D(R) = <(phi(r) - phi(r+R))^2>
= <K^2(TEC(r) - TEC(r + R))^2>
= K^2<DTEC(r, -R)^2>
:return:
"""
fig, ax = plt.subplots(1,1,figsize=(6,6))
baselines = 10 ** np.linspace(-1., 2, 100)
k_obs = []
for _ in range(500):
ds = np.random.uniform(5., 30.)
beta = np.random.normal(1.89, 0.1)
k_obs.append((baselines / ds) ** (beta))
ax.plot(baselines, np.mean(k_obs, axis=0), lw=2., color='black', label='Mevius+ 2016')
ax.fill_between(baselines, np.percentile(k_obs, 5, axis=0), np.percentile(k_obs, 95, axis=0), color='yellow',
alpha=0.5)
k_turb = (baselines / 10.) ** (5. / 3.)
ax.plot(baselines, k_turb, color='green', lw=2., label=r'Kolmogorov $5/3$')
with tf.Session() as sess:
x = tf.placeholder(float_type, shape=(3,))
k = tf.placeholder(float_type, shape=(3,))
khat = k/tf.linalg.norm(k)
X = tf.concat([khat[None,:],x[None,:]],axis=1)#1,6
theta = tf.constant([6., 14., 1./3.], float_type)
int_kern = M32(theta)# + RBF(theta/200.)
kern = TrapezoidKernel(int_kern,
20,
tf.constant(250., float_type),
tf.constant(100., float_type),
obs_type='DTEC')
K = kern.K(X,X)
xy = []
z = []
for b in baselines:
xy.append([np.concatenate([[b, 0.,0.]], axis=0),
[0.,0.,1.]])
# np.concatenate([0.0 * np.random.normal(size=2), [1.]],axis=0)
xy[-1][1] /= np.linalg.norm(xy[-1][1])
np_K = sess.run(K,{x:xy[-1][0],
k:xy[-1][1]})
z.append(np.sqrt(np_K[0,0]))
xy = np.array(xy)
z = 8.448e6*np.array(z)/150e6
ax.plot(xy[:,0,0],z**2, ls='dotted', lw=2., color='blue', label='dawn')
with tf.Session() as sess:
x = tf.placeholder(float_type, shape=(3,))
k = tf.placeholder(float_type, shape=(3,))
khat = k / tf.linalg.norm(k)
X = tf.concat([khat[None, :], x[None, :]], axis=1) # 1,6
theta = tf.constant([3., 17., 1. / 3.], float_type)
int_kern = RBF(theta) # + RBF(theta/200.)
kern = TrapezoidKernel(int_kern,
20,
tf.constant(350., float_type),
tf.constant(200., float_type),
obs_type='DTEC')
K = kern.K(X, X)
xy = []
z = []
for b in baselines:
xy.append([np.concatenate([[b, 0., 0.]], axis=0),
[0., 0., 1.]])
# np.concatenate([0.0 * np.random.normal(size=2), [1.]],axis=0)
xy[-1][1] /= np.linalg.norm(xy[-1][1])
np_K = sess.run(K, {x: xy[-1][0],
k: xy[-1][1]})
z.append(np.sqrt(np_K[0, 0]))
xy = np.array(xy)
z = 8.448e6 * np.array(z) / 150e6
ax.plot(xy[:, 0, 0], z ** 2, ls='dashed', lw=2., color='pink', label='dusk')
print(z)
ax.grid()
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim(0.1,100.)
ax.set_ylim(1e-4,1e2)
ax.set_ylabel(r"$\mathrm{Var}[\phi_{\rm 150}]$ [rad$^2$]")
ax.set_xlabel("Baseline [km]")
# plt.tricontour(xy[:,0,0],xy[:,1,0], z,levels=10)
# plt.title("Over x")
ax.legend()
plt.savefig("/home/albert/Documents/structure_function.pdf")
plt.show()
plt.subplot(projection='3d')
plt.scatter(xy[:, 1, 0], xy[:, 1, 1], c=z, alpha=0.5, marker='+')
plt.tricontour(xy[:, 1, 0], xy[:, 1, 1], z, levels=10)
plt.title("Over k")
plt.show()
diff_scale = []
thetas = np.linspace(0., np.pi/6., 100)
with tf.Session() as sess:
x = tf.placeholder(float_type, shape=(3,))
k = tf.placeholder(float_type, shape=(3,))
khat = k/tf.linalg.norm(k)
X = tf.concat([khat[None,:],x[None,:]],axis=1)#1,6
theta = tf.constant([10., 14., 1./3.], float_type)
int_kern = M32(theta)# + RBF(theta/200.)
kern = TrapezoidKernel(int_kern,
20,
tf.constant(250., float_type),
tf.constant(100., float_type),
obs_type='DTEC')
K = kern.K(X,X)
for theta in thetas:
xy = []
z = []
for b in baselines:
xy.append([np.concatenate([[b, 0.,0.]], axis=0),
[0.,np.sin(theta),np.cos(theta)]])
# np.concatenate([0.0 * np.random.normal(size=2), [1.]],axis=0)
xy[-1][1] /= np.linalg.norm(xy[-1][1])
np_K = sess.run(K,{x:xy[-1][0],
k:xy[-1][1]})
z.append(np.sqrt(np_K[0,0]))
xy = np.array(xy)
z = 8.448e6*np.array(z)/150e6
ds = np.interp(1., z, xy[:,0,0])
diff_scale.append(ds)
fig, ax = plt.subplots(1,1, figsize=(6,4))
ax.plot(np.array(thetas)*180/np.pi, diff_scale, lw=2., color='blue')
ax.grid()
ax.set_ylabel(r'$r_{\rm scale}$ [km]')
ax.set_xlabel('Zenith angle [degrees]')
plt.savefig('/home/albert/Documents/diffractive_scale.pdf')
plt.show()
if __name__ == '__main__':
main()
|
"""
graph.py
An ad-hoc implementation of a graph. NetworkX was inneficient for what I was
trying to do, matching many small graphs agains a small set of target graphs.
Graph bags are a set of graphs, which test for containment quickly.
>>> g1 = Graph()
>>> g1.add_edge("1", "2")
>>> g1.add_edge("3", "2")
>>>
>>> g2 = Graph()
>>> g2.add_edge(9, 2)
>>> g2.add_edge(3, 2)
>>>
>>> gb = GraphBag()
>>> gb.add(g1)
>>> g2 in gb
True
>>> Graph() in gb
False
"""
import collections
import itertools
class Graph:
"""
Undirected graph with loops and parallel edges.
"""
def __init__(self, vec=None):
self.m = collections.Counter()
self.edges = []
def add_node(self, v):
self.m[v, v]
def add_edge(self, v, w):
self.edges.append((v, w))
self.m[v, w] += 1
self.m[w, v] += 1
def vertices(self):
"""
The vertices in the graph.
"""
return list(set(v for v,w in self.m))
def adj_mat(self, vs):
"""
Adjacency matrix as tuple.
Basis is chosen wrt ordering vs.
"""
m = []
for v in vs:
row = []
for w in vs:
row.append(self.m[v, w])
m.append(tuple(row))
return tuple(m)
class GraphBag(dict):
def __init__(self):
super().__init__()
self.size = 0
self.members = []
def add(self, g):
if g in self:
return
self.members.append(g)
vs = g.vertices()
for vs_perm in itertools.permutations(vs):
adj_mat = g.adj_mat(vs_perm)
super().__setitem__(adj_mat, self.size)
self.size += 1
def __getitem__(self, g):
return super().__getitem__(g.adj_mat(g.vertices()))
def __contains__(self, g):
return super().__contains__(g.adj_mat(g.vertices()))
def make_target(filename):
"""
Given a file with a list graphs each given by a list of edges, produce a
dictionary which has as keys all the possible adjacency lists which can
produce a graph represented by the file.
"""
targets = GraphBag()
edges = []
f = open(filename)
lines = f.readlines()
f.close()
lines.append("")
for i, line in enumerate(lines):
line = line.strip()
if len(line) == 0:
if len(edges) == 0:
continue
assert len(edges) == 12, str(edges)
g = Graph()
for e in edges:
[v, w] = [int(x) for x in e.split()]
g.add_edge(v,w)
targets.add(g)
edges = []
else:
edges.append(line)
if edges:
assert False
return targets
|
from django.apps import AppConfig
class JacApiConfig(AppConfig):
name = "jaseci_serv.jac_api"
|
from django.urls import include, path
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('home.urls')),
path('calendar/', include('events.urls')),
path('resources/', include('resources.urls')),
path('recruiters/', include('recruiters.urls')),
path('membership/', include('membership.urls')),
path('checkin/', include('attendance.urls')),
path('accounts/', include('allauth.urls')),
path('admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Factorial
"""
# end_pymotw_header
import math
for i in [0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6]:
try:
print("{:2.1f} {:6.2f}".format(i, math.gamma(i)))
except ValueError as err:
print("Error computing gamma({}): {}".format(i, err))
|
import threading
import time
import datetime
from config import Page_URL
from config import ANALYSER
from spider import Spider
def go():
spider = Spider(Page_URL, ANALYSER)
spider.go()
print(str(datetime.datetime.now())+'------------------------------------------------')
time.sleep(60)
timer = threading.Timer(0, go)
timer.start()
timer = threading.Timer(0, go)
timer.start()
|
'''
https://leetcode.com/problems/generate-parentheses/
Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
'''
'''
Accepted
'''
class Solution:
def generateParenthesisHelper(self, stack_open, stack_closed, result, solutions):
if len(stack_open) == 0:
# we need to empty the stack closed and return the string
while len(stack_closed) > 0:
result = result + stack_closed.pop()
solutions.append(result)
else:
if len(stack_open) == len(stack_closed):
# no open but to pop from stack_open
result = result + stack_open.pop()
self.generateParenthesisHelper(stack_open.copy(), stack_closed.copy(), result, solutions)
else:
# we can either pop from closed or from open
stack_open_copy = stack_open.copy()
stack_closed_copy = stack_closed.copy()
result_open = result + stack_open_copy.pop()
result_closed = result + stack_closed_copy.pop()
# pop from open
self.generateParenthesisHelper(stack_open_copy, stack_closed.copy(), result_open, solutions)
# pop from closed
self.generateParenthesisHelper(stack_open.copy(), stack_closed_copy, result_closed, solutions)
def generateParenthesis(self, n: int) -> []:
stack_open = []
stack_closed = []
solutions = []
for i in range(0, n):
stack_open.append("(")
stack_closed.append(")")
self.generateParenthesisHelper(stack_open, stack_closed, "", solutions)
return solutions
print(Solution().generateParenthesis(8))
|
import numpy as np
from pyglet.gl import *
from fos import Actor
from fos.data import get_sphere
class SphereCloud(Actor):
def __init__(self, positions,
radii = None,
colors = None,
force_centering = False,
affine = None,
*args, **kwargs):
""" Draw a set of spheres in 3D """
super(SphereCloud, self).__init__()
if affine == None:
# create a default affine
self.affine = np.eye(4, dtype = np.float32)
else:
self.affine = affine
self._update_glaffine()
self.positions = positions
n = len(positions)
if colors == None:
# default colors
self.colors = np.array( [[1,1,1,1]], dtype = np.float32).repeat(len(self.positions),axis=0)
else:
self.colors = colors.astype( np.ubyte )
if radii == None:
# default colors
self.radii = np.array( [1.0], dtype = np.float32).repeat(len(self.positions),axis=0)
else:
assert(len(positions) == len(radii))
self.radii = radii
# create vertices / faces for the primitive
# which is a sphere in this actor
mys = np.load(get_sphere())
v = mys['vertices'].astype(np.float32)
f = mys['faces'].astype(np.uint32)
#v=octahedron_vertices
#f=octahedron_triangles
lv = v.shape[0]
lf = f.shape[0]
# allocate memory
vertices = np.zeros( (lv * n, 3), np.float32 )
faces = np.zeros( (lf * n, 3), np.uint32 )
vertexcolors = np.zeros( (lv * n, 4), np.float32 )
for i, n in enumerate(range(n)):
# scale the unit-sphere by the radius
vertices[i*lv:((i+1)*lv),:] = v * self.radii[i] + self.positions[i,:]
# add the number of vertices
faces[i*lf:((i+1)*lf),:] = f + (i * lv)
# coloring the vertices
vertexcolors[i*lv:((i+1)*lv),:] = self.colors[i,:].reshape( (1, 4) ).repeat(lv, axis=0)
self.vertices = vertices
self.faces = faces.astype(np.uint32)
self.colors = vertexcolors.astype(np.float32)
self.show_aabb = True
self.make_aabb(margin = 0)
# bind the vertex arrays
self.vert_ptr = self.vertices.ctypes.data
self.face_ptr = self.faces.ctypes.data
self.color_ptr = self.colors.ctypes.data
self.el_count = len(self.faces) * 3
def update(self, dt):
pass
def draw(self):
glPushMatrix()
glMultMatrixf(self.glaffine)
glEnable (GL_BLEND)
#glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, self.vert_ptr)
glColorPointer(4, GL_FLOAT, 0, self.color_ptr)
glDrawElements(GL_TRIANGLES, self.el_count, GL_UNSIGNED_INT, self.face_ptr)
glDisableClientState(GL_COLOR_ARRAY)
glDisableClientState(GL_VERTEX_ARRAY)
if self.show_aabb:
self.draw_aabb()
glPopMatrix()
|
"""
File : MSRuleCleanerWflow.py
Description: Provides a document Template for the MSRuleCleaner MicroServices
"""
# futures
from __future__ import division, print_function
from copy import deepcopy
class MSRuleCleanerWflow(dict):
"""
A minimal workflow and transfer information representation to serve the needs
of the MSRuleCleaner Micro Service.
"""
def __init__(self, doc, **kwargs):
super(MSRuleCleanerWflow, self).__init__(**kwargs)
# Search for all the keys we need from the ReqManager workflow description
myDoc = {}
for tup in self.docSchema():
if tup[0] in doc:
myDoc[tup[0]] = deepcopy(doc[tup[0]])
else:
myDoc.update({tup[0]: tup[1]})
self.update(myDoc)
def docSchema(self):
"""
Return the data schema for the document.
It's a tuple where:
* 1st element: is the key name / attribute in the request
* 2nd element: is the default value
* 3rd element: is the expected data type
Document format:
{
"RequestName": "ReqName",
"RequestType": "Type",
"RequestStatus": "Status",
"OutputDatasets": [],
'RulesToClean': {'plineMSTrCont': [],
'plineMSTrBlock': [],
'plineAgentCont': [],
'plineAgentBlock': []},
'CleanupStatus': {'plineMSTrCont': False,
'plineMSTrBlock': False,
'plineAgentCont': False,
'plineAgentBlock': False},
"TransferDone": False # information - returned by the MSOutput REST call.
'TargetStatus': 'normal-archived' || 'rejected-achived' || 'aborted-archived',
'ParentageResolved': Bool,
'PlineMarkers': None,
'IsClean': False
'ForceArchive', False]
}
:return: a list of tuples
"""
docTemplate = [
('RequestName', None, (str, unicode)),
('RequestType', None, (str, unicode)),
('RequestStatus', None, (str, unicode)),
('OutputDatasets', [], list),
('RulesToClean', {}, dict),
('CleanupStatus', {}, dict),
('TransferDone', False, bool),
('TargetStatus', None, (str, unicode)),
('ParentageResolved', True, bool),
('PlineMarkers', None, list),
('IsClean', False, bool),
('ForceArchive', False, bool)]
# NOTE: ParentageResolved is set by default to True it will be False only if:
# - RequestType is StepChain
# - The parent workflow is still in a transient status
# this should be one of the flags to be used to estimate if
# the workflow is good for archival
return docTemplate
|
from .atss import ATSS
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .fsaf import FSAF
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .nasfcos import NASFCOS
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
'FOVEA', 'FSAF', 'NASFCOS'
]
|
# Copyright 2019 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################
def uvcontsub(xds, field=None, fitspw=None, combine=None, solint='int', fitorder=0):
"""
.. todo::
This function is not yet implemented
Estimate continuum emission and subtract it from visibilities
Parameters
----------
xds : xarray.core.dataset.Dataset
input Visibility Dataset
field : int
field selection. If None, use all fields
fitspw : int
spw:channel selection for fitting
combine : str
data axis to combine for the continuum
solint : str
continuum fit timescale
fitorder : int
polynomial order for the fits
Returns
-------
xarray.core.dataset.Dataset
New Visibility Dataset with updated data
"""
return {}
|
"""Library for implementing cascade (sequences) of different neural modules.
Authors
* Anonymous
"""
import torch
import inspect
import logging
import operator
import functools
from speechbrain.nnet.linear import Linear
from speechbrain.utils.callchains import lengths_arg_exists
logger = logging.getLogger(__name__)
class Sequential(torch.nn.ModuleDict):
"""A sequence of modules with potentially inferring shape on construction.
If layers are passed with names, these can be referenced with dot notation.
Arguments
---------
input_shape : iterable
A list or tuple of ints or None, representing the expected shape of an
input tensor. None represents a variable-length dimension. If no
``input_shape`` is passed, no shape inference will be performed.
*layers, **named_layers
The inputs are treated as a list of layers to be
applied in sequence. The output shape of each layer is used to
infer the shape of the following layer. If a tuple is returned,
only the shape of the first element is used to determine input
shape of the next layer (e.g. RNN returns output, hidden).
Example
-------
>>> inputs = torch.rand(10, 40, 50)
>>> model = Sequential(input_shape=inputs.shape)
>>> model.append(Linear, n_neurons=100, layer_name="layer1")
>>> model.append(Linear, n_neurons=200, layer_name="layer2")
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 40, 200])
>>> outputs = model.layer1(inputs)
>>> outputs.shape
torch.Size([10, 40, 100])
"""
def __init__(self, *layers, input_shape=None, **named_layers):
super().__init__()
# Make sure either layers or input_shape is passed
if not layers and input_shape is None and not named_layers:
raise ValueError("Must pass either layers or input shape")
# Keep track of what layers need "lengths" passed
self.length_layers = []
# Replace None dimensions with arbitrary value
self.input_shape = input_shape
if input_shape and None in input_shape:
self.input_shape = list(input_shape)
for i, dim in enumerate(self.input_shape):
# To reduce size of dummy tensors, use 1 for batch dim
if i == 0 and dim is None:
dim = 1
# Use 64 as nice round arbitrary value, big enough that
# halving this dimension a few times doesn't reach 1
self.input_shape[i] = dim or 64
# Append non-named layers
for layer in layers:
self.append(layer)
# Append named layers
for name, layer in named_layers.items():
self.append(layer, layer_name=name)
def append(self, layer, *args, layer_name=None, **kwargs):
"""Add a layer to the list of layers, inferring shape if necessary.
Arguments
---------
layer : A torch.nn.Module class or object
If the layer is a class, it should accept an argument called
``input_shape`` which will be inferred and passed. If the layer
is a module object, it is added as-is.
layer_name : str
The name of the layer, for reference. If the name is in use,
``_{count}`` will be appended.
*args, **kwargs
These are passed to the layer if it is constructed.
"""
# Compute layer_name
if layer_name is None:
layer_name = str(len(self))
elif layer_name in self:
index = 0
while f"{layer_name}_{index}" in self:
index += 1
layer_name = f"{layer_name}_{index}"
# Check if it needs to be constructed with input shape
if self.input_shape:
argspec = inspect.getfullargspec(layer)
if "input_shape" in argspec.args + argspec.kwonlyargs:
input_shape = self.get_output_shape()
layer = layer(*args, input_shape=input_shape, **kwargs)
# Finally, append the layer.
try:
self.add_module(layer_name, layer)
except TypeError:
raise ValueError(
"Must pass `input_shape` at initialization and use "
"modules that take `input_shape` to infer shape when "
"using `append()`."
)
def get_output_shape(self):
"""Returns expected shape of the output.
Computed by passing dummy input constructed with the
``self.input_shape`` attribute.
"""
with torch.no_grad():
dummy_input = torch.zeros(self.input_shape)
dummy_output = self(dummy_input)
return dummy_output.shape
def forward(self, x):
"""Applies layers in sequence, passing only the first element of tuples.
Arguments
---------
x : torch.Tensor
The input tensor to run through the network.
"""
for layer in self.values():
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
class LengthsCapableSequential(Sequential):
"""Sequential model that can take ``lengths`` in the forward method.
This is useful for Sequential models that include RNNs where it is
important to avoid padding, or for some feature normalization layers.
Unfortunately, this module is not jit-able because the compiler doesn't
know ahead of time if the length will be passed, and some layers don't
accept the length parameter.
"""
def __init__(self, *args, **kwargs):
# Add takes_lengths list here.
self.takes_lengths = []
super().__init__(*args, **kwargs)
def append(self, *args, **kwargs):
# Add lengths arg inference here.
super().append(*args, **kwargs)
latest_forward_method = list(self.values())[-1].forward
self.takes_lengths.append(lengths_arg_exists(latest_forward_method))
def forward(self, x, lengths=None):
"""Applies layers in sequence, passing only the first element of tuples.
In addition, forward the ``lengths`` argument to all layers that accept
a ``lengths`` argument in their ``forward()`` method (e.g. RNNs).
Arguments
---------
x : torch.Tensor
The input tensor to run through the network.
lengths : torch.Tensor
The relative lengths of each signal in the tensor.
"""
for layer, give_lengths in zip(self.values(), self.takes_lengths):
if give_lengths:
x = layer(x, lengths=lengths)
else:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
class ModuleList(torch.nn.Module):
"""This class implements a wrapper to torch.nn.ModuleList with a forward()
method to forward all the layers sequentially.
For some pretrained model with the SpeechBrain older implementation of
Sequential class, user can use this class to load those pretrained models
Arguments
---------
*layers : torch class
Torch objects to be put in a ModuleList.
"""
def __init__(self, *layers):
super().__init__()
self.layers = torch.nn.ModuleList(layers)
def forward(self, x):
for layer in self.layers:
x = layer(x)
if isinstance(x, tuple):
x = x[0]
return x
def append(self, module):
self.layers.append(module)
def extend(self, modules):
self.layers.extend(modules)
def insert(self, index, module):
self.layers.insert(module)
class ConnectBlocks(torch.nn.Module):
"""Connect a sequence of blocks with shortcut connections.
Note: all shortcuts start from the output of the first block,
since the first block may change the shape significantly.
Arguments
---------
input_shape : tuple
The shape of the
shortcut_type : str
One of:
* "residual" - first block output passed to final output,
* "dense" - input of each block is from all previous blocks,
* "skip" - output of each block is passed to final output.
shortcut_projection : bool
Only has an effect if `shortcut_type` is passed. Whether to add a
linear projection layer to the shortcut connection before combining
with the output, to handle different sizes.
shortcut_combine_fn : str or function
Either a pre-defined function (one of "add", "sub", "mul", "div",
"avg", "cat") or a user-defined function that takes the shortcut
and next input, and combines them, as well as `init_params`
in case parameters need to be initialized inside of the function.
Example
-------
>>> inputs = torch.rand(10, 100, 20)
>>> model = ConnectBlocks(
... input_shape=inputs.shape, shortcut_projection=True
... )
>>> model.append(Linear, n_neurons=10)
>>> model.append(Linear, n_neurons=10, end_of_block=True)
>>> model.append(Linear, n_neurons=10)
>>> model.append(Linear, n_neurons=10, end_of_block=True)
>>> outputs = model(inputs)
>>> outputs.shape
torch.Size([10, 100, 10])
"""
def __init__(
self,
input_shape,
shortcut_type="residual",
shortcut_projection=False,
shortcut_combine_fn=torch.add,
):
super().__init__()
self.first_input_shape = input_shape
self.block_input_shape = input_shape
self.new_block = True
self.blocks = torch.nn.ModuleList()
if shortcut_type not in ["residual", "dense", "skip"]:
raise ValueError(
"'shortcuts' must be one of 'residual', 'dense', or 'skip'"
)
self.shortcut_type = shortcut_type
self.shortcut_projection = shortcut_projection
if shortcut_projection:
self.projections = torch.nn.ModuleList()
self.shortcut_combine_fn = shortcut_combine_fn
def append(self, layer, *args, **kwargs):
"""Appends the specified module to the shortcut model.
Arguments
---------
layer : torch.nn.Module class
This layer will get initialized with *args and **kwargs. Also,
the argument ``input_shape`` will be passed if the layer takes it.
*args, **kwargs
Passed unchanged to the layer **EXCEPT** the kwarg ``end_of_block``
which is used to indicate that the shortcut should be added in.
"""
if self.new_block:
self.blocks.append(Sequential(input_shape=self.block_input_shape))
self.new_block = False
end_of_block = False
if "end_of_block" in kwargs:
end_of_block = kwargs["end_of_block"]
del kwargs["end_of_block"]
self.blocks[-1].append(layer, *args, **kwargs)
# When we reach the end of the block, prepare to add shortcut
if end_of_block:
# Use dummy input to find shape of next block
dummy_input = torch.zeros(self.block_input_shape)
dummy_output = self.blocks[-1](dummy_input)
# Initialize projection if necessary
if self.shortcut_projection:
projection_size = functools.reduce(
operator.mul, dummy_output.shape[2:], 1
)
if self.shortcut_type == "residual":
shape = self.first_input_shape
dummy_input = torch.zeros(self.first_input_shape)
else:
shape = self.block_input_shape
self.projections.append(
Linear(
n_neurons=projection_size,
input_shape=shape,
bias=False,
combine_dims=True,
)
)
# Prepare for next block
self.new_block = True
dummy_output = self._combine(dummy_input, dummy_output, -1)
self.block_input_shape = dummy_output.shape
def forward(self, x):
"""
Arguments
---------
x : torch.Tensor
The inputs to the replicated modules.
"""
shortcut = x
for i, block in enumerate(self.blocks):
x = block(x)
if self.shortcut_type == "skip":
shortcut = self._combine(shortcut, x, i)
if self.shortcut_type == "dense":
x = shortcut = self._combine(shortcut, x, i)
if self.shortcut_type == "residual":
x = self._combine(shortcut, x, i)
if self.shortcut_type == "skip":
return shortcut
else:
return x
def _combine(self, shortcut, x, block_index=0):
"""Handle combining shortcut with outputs."""
# Apply projection
if self.shortcut_projection:
shortcut = self.projections[block_index](shortcut)
shortcut = shortcut.reshape(x.shape)
return self.shortcut_combine_fn(shortcut, x)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import platform
import re
from oslo.config import cfg
from cloudbaseinit.openstack.common import log as logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins import base
opts = [
cfg.BoolOpt('netbios_host_name_compatibility', default=True,
help='Truncates the hostname to 15 characters for Netbios '
'compatibility'),
]
CONF = cfg.CONF
CONF.register_opts(opts)
LOG = logging.getLogger(__name__)
NETBIOS_HOST_NAME_MAX_LEN = 15
class SetHostNamePlugin(base.BasePlugin):
def execute(self, service, shared_data):
osutils = osutils_factory.get_os_utils()
metadata_host_name = service.get_host_name()
if not metadata_host_name:
LOG.debug('Hostname not found in metadata')
return (base.PLUGIN_EXECUTION_DONE, False)
metadata_host_name = metadata_host_name.split('.', 1)[0]
if (len(metadata_host_name) > NETBIOS_HOST_NAME_MAX_LEN and
CONF.netbios_host_name_compatibility):
new_host_name = metadata_host_name[:NETBIOS_HOST_NAME_MAX_LEN]
LOG.warn('Truncating host name for Netbios compatibility. '
'Old name: %(metadata_host_name)s, new name: '
'%(new_host_name)s' %
{'metadata_host_name': metadata_host_name,
'new_host_name': new_host_name})
else:
new_host_name = metadata_host_name
new_host_name = re.sub(r'-$', '0', new_host_name)
if platform.node().lower() == new_host_name.lower():
LOG.debug("Hostname already set to: %s" % new_host_name)
reboot_required = False
else:
LOG.info("Setting hostname: %s" % new_host_name)
osutils.set_host_name(new_host_name)
reboot_required = True
return (base.PLUGIN_EXECUTION_DONE, reboot_required)
|
from __future__ import division, print_function, absolute_import
__author__ = 'Alex Rogozhnikov'
import functools
import nose
def known_failure(test):
"""
Decorator to mark known failures in tests
"""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception:
raise nose.SkipTest
return inner
def retry_if_fails(test):
"""
Decorator to mark tests which can frequently fail.
"""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except AssertionError:
test(*args, **kwargs)
return inner
|
# -*- coding: utf-8 -*-
"""
Template to generate the input files for the FEM code solids_ISO.
The script uses module meshio.py to read a GMSH mesh and produce
text files nodes.txt, eles.txt , mater.txt and loads.txt
@authors: Juan Gomez
Nicolas Guarin-Zapata
"""
from __future__ import division, print_function
from __future__ import division
import meshio
import solidspy.preprocesor as msh
import numpy as np
#
points, cells, point_data, cell_data, field_data = \
meshio.read("BoussiR.msh")
#
nodes_array = msh.node_writer(points , point_data)
nf , els1_array = msh.ele_writer(cells , cell_data , "triangle" , 10000 , 3 , 0 , 0)
#
nodes_array = msh.boundary_conditions(cells , cell_data , 300 , nodes_array , -1 , 0)
nodes_array = msh.boundary_conditions(cells , cell_data , 400 , nodes_array , 0 , -1)
cargas = msh.loading(cells , cell_data , 200 , 0.0 , -1.0)
#
np.savetxt("eles.txt" , els1_array , fmt="%d")
np.savetxt("loads.txt", cargas, fmt=("%d", "%.6f", "%.6f"))
np.savetxt("nodes.txt", nodes_array , fmt=("%d", "%.4f", "%.4f", "%d", "%d"))
|
def statistical_error(actual, predicted):
error = ((abs(predicted - actual))/actual)
return error
|
from PyQt5.QtCore import QThread, QObject, pyqtSignal, pyqtSlot, QRunnable
import numpy as np
class WorkerSignals(QObject):
finished = pyqtSignal()
output = pyqtSignal(np.ndarray)
class GoL_Worker(QRunnable):
def __init__(self, input_array, height, width):
super(GoL_Worker, self).__init__()
self.input_array = input_array
self.height = height
self.width = width
self.signals = WorkerSignals()
@pyqtSlot()
def run(self):
result = self.create_new_array()
self.signals.output.emit(result)
self.signals.finished.emit()
@pyqtSlot()
def create_new_array(self):
return np.random.randint(2, size=(self.height, self.width))
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''
Test case for AC training.
'''
#pylint: disable=C0413
#pylint: disable=C0411
#pylint: disable=W0611
import os
import sys
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
MODEL_PATH = os.path.join(ROOT_PATH, 'example')
sys.path.insert(0, MODEL_PATH)
import pytest
from ac.src import config
from ac.src.ac_trainer import ACTrainer
from mindspore import context
from mindspore_rl.core import Session
from mindspore_rl.utils.callback import LossCallback
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_train_ac():
'''
Train the AC.
'''
context.set_context(mode=context.GRAPH_MODE)
loss_cb = LossCallback()
ac_session = Session(config.algorithm_config)
ac_session.run(class_type=ACTrainer, episode=5, params=config.trainer_params, callbacks=[loss_cb])
assert True
|
import os
import queue
import threading
from time import sleep
import numpy as np
import tensorflow as tf
from tensorflow.python.training.adam import AdamOptimizer
from env import MultiArmTorqueEnvironment
from models import autoencoder_seq
N_ITERATIONS = 10000
N_JOINTS = 2
SEQ_LEN = 16
BATCH_SIZE = 1024 * 16
MOTION_SELECTION = 4 * 4
LSTM_SIZE = MOTION_SELECTION + 2 ** N_JOINTS
MOTIONS = np.identity(MOTION_SELECTION)
# BATCH_SIZE, MOTION_SELECTION
selected_gesture = tf.placeholder(tf.float32, [None, MOTION_SELECTION], 'selected_gesture')
batch_sz = tf.shape(selected_gesture)[0]
noise_op = tf.random_uniform([batch_sz, SEQ_LEN], -1, 1, tf.float32, None, 'noise_sequence')
with tf.variable_scope('noisy_initial_state'):
x = lambda: tf.random_uniform([batch_sz, LSTM_SIZE], -1, 1, tf.float32)
initial_state_op = [[x(), x()], [x(), x()]]
with tf.variable_scope('autoencoder'):
# [BATCH_SIZE, MOTION_SELECTION] , [BATCH_SIZE, SEQ_LEN, N_JOINTS]
softmax_class_op, pred_states_op, _ = autoencoder_seq(selected_gesture, noise_op, initial_state_op, SEQ_LEN,
N_JOINTS,
LSTM_SIZE)
with tf.variable_scope('eval'):
pred_class, true_class = tf.argmax(softmax_class_op, axis=1), tf.argmax(selected_gesture, axis=1)
accuracy = tf.divide(tf.count_nonzero(tf.equal(pred_class, true_class), dtype=tf.int32), batch_sz, name='accuracy')
tf.summary.scalar('accuracy', accuracy)
from tflearn.objectives import categorical_crossentropy
loss = categorical_crossentropy(softmax_class_op, selected_gesture)
tf.summary.scalar('classification_loss', loss)
with tf.variable_scope('optimize'):
lr_op = tf.Variable(5e-4, False, dtype=tf.float32)
decay_lr_op = tf.assign(lr_op, lr_op * (1 - 1e-4))
tf.summary.scalar('learning_rate', lr_op)
with tf.control_dependencies([decay_lr_op]):
train_step = AdamOptimizer(learning_rate=lr_op).minimize(loss)
display_q = queue.Queue(10)
def display():
while True:
softmax_class, display_states = display_q.get()
print("Prediction: ", np.max(softmax_class, axis=1))
for states in np.transpose(display_states, axes=[1, 0, 2]):
env.step(states)
env.render()
sleep(.2 / (display_q.qsize() + 1))
env.reset()
threading.Thread(target=display).start()
summaries_op = tf.summary.merge_all()
writer = tf.summary.FileWriter(os.environ['logdir'] + '/creative_autoencoder/', tf.get_default_graph())
env = MultiArmTorqueEnvironment(n_arms=MOTION_SELECTION, n_joints=N_JOINTS, time_lim=SEQ_LEN)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
for iteration in range(N_ITERATIONS):
batch = MOTIONS[np.random.randint(0, MOTION_SELECTION, BATCH_SIZE)]
_, summaries, _ = sess.run([train_step, summaries_op, decay_lr_op], feed_dict={selected_gesture: batch})
writer.add_summary(summaries)
if iteration % 40 == 0:
display_q.put(sess.run([softmax_class_op, pred_states_op], feed_dict={selected_gesture: MOTIONS}))
writer.flush()
env.reset()
|
from .schema import *
import pyarrow
def read_type(doc):
t = doc[TYPE]
if PARAM in doc:
tp = doc[PARAM]
else:
tp = None
if t == 'null':
return pyarrow.null()
if t == 'bool':
return pyarrow.bool_()
if t == 'int8':
return pyarrow.int8()
if t == 'int16':
return pyarrow.int16()
if t == 'int32':
return pyarrow.int32()
if t == 'int64':
return pyarrow.int64()
if t == 'uint8':
return pyarrow.uint8()
if t == 'uint16':
return pyarrow.uint16()
if t == 'uint32':
return pyarrow.uint32()
if t == 'uint64':
return pyarrow.uint64()
if t == 'float16':
return pyarrow.float16()
if t == 'float32':
return pyarrow.float32()
if t == 'float64':
return pyarrow.float64()
if t == 'date[d]':
return pyarrow.date32()
if t == 'date[ms]':
return pyarrow.date64()
if t == 'timestamp[s]':
return pyarrow.timestamp('s')
if t == 'timestamp[ms]':
return pyarrow.timestamp('ms')
if t == 'timestamp[us]':
return pyarrow.timestamp('us')
if t == 'timestamp[ns]':
return pyarrow.timestamp('ns')
if t == 'time[s]':
return pyarrow.time32('s')
if t == 'time[ms]':
return pyarrow.time32('ms')
if t == 'time[us]':
return pyarrow.time64('us')
if t == 'time[ns]':
return pyarrow.time64('ns')
if t == 'utf8':
return pyarrow.utf8()
if t == 'bytes':
return pyarrow.binary()
if t == 'factor':
if tp is None:
index_type = pyarrow.int32()
dict_type = pyarrow.utf8()
else:
index_type = read_type(tp[INDEX])
dict_type = read_type(tp[DICT])
return pyarrow.dictionary(index_type, dict_type, False)
if t == 'ordered':
if tp is None:
index_type = pyarrow.int32()
dict_type = pyarrow.utf8()
else:
index_type = read_type(tp[INDEX])
dict_type = read_type(tp[DICT])
return pyarrow.dictionary(index_type, dict_type, True)
if t == 'opaque':
return pyarrow.binary(tp)
if t == 'list':
return pyarrow.list_(read_type(tp))
if t == 'struct':
return pyarrow.struct(
[pyarrow.field(f[NAME], read_type(f)) for f in tp])
raise ValueError(f'{t} is not supported BSON DataFrame type')
|
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import TYPE_CHECKING, Any, ClassVar, List, Optional
from tfx.proto.orchestration.pipeline_pb2 import Pipeline as Pb2Pipeline
from zenml.orchestrators.base_orchestrator import BaseOrchestrator
from zenml.steps import BaseStep
if TYPE_CHECKING:
from zenml.pipelines import BasePipeline
from zenml.runtime_configuration import RuntimeConfiguration
from zenml.stack import Stack
class AriaOrchestrator(BaseOrchestrator):
FLAVOR: ClassVar[str] = "aria"
favorite_orchestration_language: str
favorite_orchestration_language_version: Optional[str] = "1.0"
def prepare_or_run_pipeline(
self,
sorted_steps: List[BaseStep],
pipeline: "BasePipeline",
pb2_pipeline: Pb2Pipeline,
stack: "Stack",
runtime_configuration: "RuntimeConfiguration",
) -> Any:
"""Mock function."""
def run_pipeline(
self,
pipeline: "BasePipeline",
stack: "Stack",
runtime_configuration: "RuntimeConfiguration",
) -> Any:
"""Mock function."""
|
#!/usr/bin/python3
import sys, os
filename = sys.argv[1];
output = filename + ".c";
try:
compiler = sys.argv[2]
except:
compiler = "/usr/bin/gcc"
f = open(filename, "r");
content = f.read();
f.close();
del f;
if os.path.exists(output):
os.remove(output);
def fileappend(filename, content):
f = open(filename, "a");
f.write(content);
f.close();
del f;
def appendcode(content, tab = True):
if tab:
fileappend(output, ("\t" * tab) + content + "\n");
else:
fileappend(output, content + "\n");
# begining of file
appendcode("#include <stdio.h>");
appendcode("int main() {", tab = False);
appendcode("char array[30000] = {0}; char *ptr = array;");
# body of file
tabcount = 0;
for i in content:
if i == ">":
appendcode("++ptr;", tab = tabcount + 1);
elif i == "<":
appendcode("--ptr;", tab = tabcount + 1);
elif i == "+":
appendcode("++*ptr;", tab = tabcount + 1);
elif i == "-":
appendcode("--*ptr;", tab = tabcount + 1);
elif i == ".":
appendcode("putchar(*ptr);", tab = tabcount + 1);
elif i == ",":
appendcode("*ptr = getchar();", tab = tabcount + 1);
elif i == "[":
appendcode("while (*ptr) {", tab = tabcount + 1);
tabcount += 1;
elif i == "]":
tabcount -= 1;
appendcode("}", tab = tabcount + 1);
else:
continue;
if tabcount >= 1:
print("error: found an unexpected '['", file = sys.stderr);
os.remove(output);
exit(1);
if tabcount < 0:
print("error: found an unexpected ']'", file = sys.stderr);
os.remove(output);
exit(1);
# end of file
appendcode("}", tab = False);
print("finished part 1 with no errors");
print("starting compilation");
# compiling
ret = os.system(compiler + " -o " + filename + ".exe " + output); # os.execvp("/usr/bin/gcc", ["/usr/bin/gcc", output]);
if ret > 0:
print("error: compilation error", file = sys.stderr);
# cleanup
os.remove(output)
|
#from __future__ import print_function
import ast
import os
import sys
import codecs
import subprocess
from fnmatch import fnmatchcase
from distutils.util import convert_path
from setuptools import setup, find_packages
def find_version(*parts):
version_py = os.path.join(os.path.dirname(__file__), 'version.py')
try:
version_git = subprocess.check_output(["git", "tag"]).rstrip().splitlines()[-1]
except:
with open(version_py, 'wrb') as fh:
version_git = open(version_py, 'rb').read().strip().split('=')[-1].replace('"','')
version_msg = "# Do not edit this file, pipeline versioning is governed by git tags" + os.linesep + "# following PEP 386"
with open(version_py, 'wrb') as fh:
fh.write(version_msg + os.linesep + '__version__ = "%s"' % version_git)
return "{ver}".format(ver=version_git)
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*$py.class', '*~', '.*', '*.bak')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
setup(
name='django-fontforge-watcher',
version=find_version(),
description='Django fontforge watchers and auto compilers',
long_description=read('README.rst'),
author='Autrusseau Damien',
author_email='autrusseau.damien@gmail.com',
url='http://github.com/dalou/django-fontforge-watcher',
packages=find_packages(),
zip_safe=False,
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
test_suite='runtests.runtests',
install_requires=[
'django >= 1.8.4, <= 1.9',
'watchdog >= 0.8.3',
'stylus >= 0.1.1',
],
)
|
from pypy.module.micronumpy.interp_iter import ViewIterator
class TestIterDirect(object):
def test_C_viewiterator(self):
#Let's get started, simple iteration in C order with
#contiguous layout => strides[-1] is 1
start = 0
shape = [3, 5]
strides = [5, 1]
backstrides = [x * (y - 1) for x,y in zip(strides, shape)]
assert backstrides == [10, 4]
i = ViewIterator(start, strides, backstrides, shape)
i = i.next(2)
i = i.next(2)
i = i.next(2)
assert i.offset == 3
assert not i.done()
assert i.indices == [0,3]
#cause a dimension overflow
i = i.next(2)
i = i.next(2)
assert i.offset == 5
assert i.indices == [1,0]
#Now what happens if the array is transposed? strides[-1] != 1
# therefore layout is non-contiguous
strides = [1, 3]
backstrides = [x * (y - 1) for x,y in zip(strides, shape)]
assert backstrides == [2, 12]
i = ViewIterator(start, strides, backstrides, shape)
i = i.next(2)
i = i.next(2)
i = i.next(2)
assert i.offset == 9
assert not i.done()
assert i.indices == [0,3]
#cause a dimension overflow
i = i.next(2)
i = i.next(2)
assert i.offset == 1
assert i.indices == [1,0]
def test_C_viewiterator_step(self):
#iteration in C order with #contiguous layout => strides[-1] is 1
#skip less than the shape
start = 0
shape = [3, 5]
strides = [5, 1]
backstrides = [x * (y - 1) for x,y in zip(strides, shape)]
assert backstrides == [10, 4]
i = ViewIterator(start, strides, backstrides, shape)
i = i.next_skip_x(2,2)
i = i.next_skip_x(2,2)
i = i.next_skip_x(2,2)
assert i.offset == 6
assert not i.done()
assert i.indices == [1,1]
#And for some big skips
i = i.next_skip_x(2,5)
assert i.offset == 11
assert i.indices == [2,1]
i = i.next_skip_x(2,5)
# Note: the offset does not overflow but recycles,
# this is good for broadcast
assert i.offset == 1
assert i.indices == [0,1]
assert i.done()
#Now what happens if the array is transposed? strides[-1] != 1
# therefore layout is non-contiguous
strides = [1, 3]
backstrides = [x * (y - 1) for x,y in zip(strides, shape)]
assert backstrides == [2, 12]
i = ViewIterator(start, strides, backstrides, shape)
i = i.next_skip_x(2,2)
i = i.next_skip_x(2,2)
i = i.next_skip_x(2,2)
assert i.offset == 4
assert i.indices == [1,1]
assert not i.done()
i = i.next_skip_x(2,5)
assert i.offset == 5
assert i.indices == [2,1]
assert not i.done()
i = i.next_skip_x(2,5)
assert i.indices == [0,1]
assert i.offset == 3
assert i.done()
|
"""
This is a place to create a python wrapper for the BASGRA fortran model in fortarn_BASGRA_NZ
Author: Matt Hanson
Created: 12/08/2020 9:32 AM
"""
import os
import ctypes as ct
import numpy as np
import pandas as pd
from subprocess import Popen
from copy import deepcopy
from input_output_keys import param_keys, out_cols, days_harvest_keys, matrix_weather_keys_pet, \
matrix_weather_keys_penman
from warnings import warn
# compiled with gfortran 64,
# https://sourceforge.net/projects/mingwbuilds/files/host-windows/releases/4.8.1/64-bit/threads-posix/seh/x64-4.8.1-release-posix-seh-rev5.7z/download
# compilation code: compile_basgra_gfortran.bat
# define the dll library path
_libpath_pet = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_pet.DLL')
_libpath_peyman = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ/BASGRA_peyman.DLL')
_bat_path = os.path.join(os.path.dirname(__file__), 'fortran_BASGRA_NZ\\compile_BASGRA_gfortran.bat')
# this is the maximum number of weather days,
# it is hard coded into fortran_BASGRA_NZ/environment.f95 line 9
_max_weather_size = 36600
def run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=False,
dll_path='default', supply_pet=True, auto_harvest=False, run_365_calendar=False):
"""
python wrapper for the fortran BASGRA code
changes to the fortran code may require changes to this function
runs the model for the period of the weather data
:param params: dictionary, see input_output_keys.py, README.md, or
https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PYfor more details
:param matrix_weather: pandas dataframe of weather data, maximum entries set in _max_weather_size in line 24
of this file (currently 36600)
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param days_harvest: days harvest dataframe must be same length as matrix_weather entries
see documentation for input columns at https://github.com/Komanawa-Solutions-Ltd/BASGRA_NZ_PY
or README.md, note expected DOY will change depending on expect_no_leap_days
:param doy_irr: a list of the days of year to irrigate on, must be integers acceptable values: (0-366)
:param verbose: boolean, if True the fortran function prints a number of statements for debugging purposes
(depreciated)
:param dll_path: path to the compiled fortran DLL to use, default was made on windows 10 64 bit, if the path does
not exist, this function will try to run the bat file to re-make the dll.
:param supply_pet: boolean, if True BASGRA expects pet to be supplied, if False the parameters required to
calculate pet from the peyman equation are expected,
the version must match the DLL if dll_path != 'default'
:param auto_harvest: boolean, if True then assumes data is formated correctly for auto harvesting, if False, then
assumes data is formatted for manual harvesting (e.g. previous version) and re-formats
internally
:param run_365_calendar: boolean, if True then run on a 365 day calender
This expects that all leap days will be removed from matrix_weather and
days_harvest. DOY is expected to be between 1 and 365. This means that datetime
objects defined by year and doy will be incorrect. instead use
get_month_day_to_nonleap_doy to map DOY to datetime via month and day. This is how
the index of the returned datetime will be passed. For example for date 2024-03-01
(2024 is a leap year) the dayofyear via a datetime object will be 61, but if
expect_no_leap_days=True basgra expects day of year to be 60. the index of the
results will be a datetime object of equivalent to 2024-03-01, so the output doy
will not match the index doy and there will be no value on 2020-02-29.
default False
:return: pd.DataFrame(index=datetime index, columns = out_cols)
"""
assert isinstance(supply_pet, bool), 'supply_pet param must be boolean'
assert isinstance(auto_harvest, bool), 'auto_harvest param must be boolean'
assert isinstance(run_365_calendar, bool), 'expect_no_leap_days must be boolean'
# define DLL library path
use_default_lib = False
if dll_path == 'default':
use_default_lib = True
if supply_pet:
dll_path = _libpath_pet
else:
dll_path = _libpath_peyman
# check that library path exists
if not os.path.exists(dll_path):
if use_default_lib:
# try to run the bat file
print('dll not found, trying to run bat to create DLL:\n{}'.format(_bat_path))
p = Popen(os.path.basename(_bat_path), cwd=os.path.dirname(_bat_path), shell=True)
stdout, stderr = p.communicate()
print('output of bat:\n{}\n{}'.format(stdout, stderr))
if not os.path.exists(dll_path):
raise EnvironmentError('default DLL path not found:\n'
'{}\n'
'see readme for more details:\n'
'{}'.format(dll_path, os.path.dirname(__file__) + 'README.md'))
else:
raise EnvironmentError('DLL path not found:\n{}'.format(dll_path))
# define expected weather keys
if supply_pet:
_matrix_weather_keys = matrix_weather_keys_pet
else:
_matrix_weather_keys = matrix_weather_keys_penman
doy_irr = np.atleast_1d(doy_irr)
# test the input variables
_test_basgra_inputs(params, matrix_weather, days_harvest, verbose, _matrix_weather_keys,
auto_harvest, doy_irr, run_365_calendar=run_365_calendar)
nout = len(out_cols)
ndays = len(matrix_weather)
nirr = len(doy_irr)
# define output indexes before data manipulation
out_index = matrix_weather.index
# copy everything and ensure order is correct
params = deepcopy(params)
matrix_weather = deepcopy(matrix_weather.loc[:, _matrix_weather_keys])
days_harvest = deepcopy(days_harvest.loc[:, days_harvest_keys])
# translate manual harvest inputs into fortran format
if not auto_harvest:
days_harvest = _trans_manual_harv(days_harvest, matrix_weather)
# get variables into right python types
params = np.array([params[e] for e in param_keys]).astype(float)
matrix_weather = matrix_weather.values.astype(float)
days_harvest = days_harvest.values.astype(float)
doy_irr = doy_irr.astype(np.int32)
# manage weather size,
weather_size = len(matrix_weather)
if weather_size < _max_weather_size:
temp = np.zeros((_max_weather_size - weather_size, matrix_weather.shape[1]), float)
matrix_weather = np.concatenate((matrix_weather, temp), 0)
y = np.zeros((ndays, nout), float) # cannot set these to nan's or it breaks fortran
# make pointers
# arrays # 99% sure this works
params_p = np.asfortranarray(params).ctypes.data_as(ct.POINTER(ct.c_double)) # 1d array, float
matrix_weather_p = np.asfortranarray(matrix_weather).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
days_harvest_p = np.asfortranarray(days_harvest).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
y_p = np.asfortranarray(y).ctypes.data_as(ct.POINTER(ct.c_double)) # 2d array, float
doy_irr_p = np.asfortranarray(doy_irr).ctypes.data_as(ct.POINTER(ct.c_long))
# integers
ndays_p = ct.pointer(ct.c_int(ndays))
nirr_p = ct.pointer(ct.c_int(nirr))
nout_p = ct.pointer(ct.c_int(nout))
verb_p = ct.pointer(ct.c_bool(verbose))
# load DLL
for_basgra = ct.CDLL(dll_path)
# run BASGRA
for_basgra.BASGRA_(params_p, matrix_weather_p, days_harvest_p, ndays_p, nout_p, nirr_p, doy_irr_p, y_p, verb_p)
# format results
y_p = np.ctypeslib.as_array(y_p, (ndays, nout))
y_p = y_p.flatten(order='C').reshape((ndays, nout), order='F')
y_p = pd.DataFrame(y_p, out_index, out_cols)
if run_365_calendar:
mapper = get_month_day_to_nonleap_doy(key_doy=True)
strs = [f'{y}-{mapper[doy][0]:02d}-{mapper[doy][1]:02d}' for y, doy in zip(y_p.year.values.astype(int),
y_p.doy.values.astype(int))]
y_p.loc[:, 'date'] = pd.to_datetime(strs)
else:
strs = ['{}-{:03d}'.format(int(e), int(f)) for e, f in y_p[['year', 'doy']].itertuples(False, None)]
y_p.loc[:, 'date'] = pd.to_datetime(strs, format='%Y-%j')
y_p.set_index('date', inplace=True)
return y_p
def _trans_manual_harv(days_harvest, matrix_weather):
"""
translates manual harvest data to the format expected by fortran, check the details of the data in here.
:param days_harvest: manual harvest data
:param matrix_weather: weather data, mostly to get the right size
:return: days_harvest (correct format for fortran code)
"""
days_harvest = days_harvest.set_index(['year', 'doy'])
days_harvest_out = pd.DataFrame({'year': matrix_weather.loc[:, 'year'],
'doy': matrix_weather.loc[:, 'doy'],
'frac_harv': np.zeros(len(matrix_weather)), # set filler values
'harv_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not harvest
'harv_targ': np.zeros(len(matrix_weather)), # set filler values
'weed_dm_frac': np.zeros(len(matrix_weather)) * np.nan, # set nas, filled later
'reseed_trig': np.zeros(len(matrix_weather)) - 1, # set flag to not reseed
'reseed_basal': np.zeros(len(matrix_weather)), # set filler values
})
days_harvest_out = days_harvest_out.set_index(['year', 'doy'])
for k in set(days_harvest_keys) - {'year', 'doy'}:
days_harvest_out.loc[days_harvest.index, k] = days_harvest.loc[:, k]
days_harvest_out = days_harvest_out.reset_index()
# fill the weed fraction so that DMH_WEED is always calculated
if pd.isna(days_harvest_out.weed_dm_frac).iloc[0]:
warn('weed_dm_frac is na for the first day of simulation, setting to first valid weed_dm_frac\n'
'this does not affect the harvesting only the calculation of the DMH_weed variable.')
idx = np.where(pd.notna(days_harvest_out.weed_dm_frac))[0][0] # get first non-nan value
id_val = pd.Series(days_harvest_out.index).iloc[0]
days_harvest_out.loc[id_val, 'weed_dm_frac'] = days_harvest_out.loc[:, 'weed_dm_frac'].iloc[idx]
days_harvest_out.loc[:, 'weed_dm_frac'] = days_harvest_out.loc[:, 'weed_dm_frac'].fillna(method='ffill')
return days_harvest_out
def _test_basgra_inputs(params, matrix_weather, days_harvest, verbose, _matrix_weather_keys,
auto_harvest, doy_irr, run_365_calendar):
# check parameters
assert isinstance(verbose, bool), 'verbose must be boolean'
assert isinstance(params, dict)
assert set(params.keys()) == set(param_keys), 'incorrect params keys'
assert not any([np.isnan(e) for e in params.values()]), 'params cannot have na data'
assert params['reseed_harv_delay'] >= 1, 'harvest delay must be >=1'
assert params['reseed_harv_delay'] % 1 < 1e5, 'harvest delay must effectively be an integer'
# check matrix weather
assert isinstance(matrix_weather, pd.DataFrame)
assert set(matrix_weather.keys()) == set(_matrix_weather_keys), 'incorrect keys for matrix_weather'
assert pd.api.types.is_integer_dtype(matrix_weather.doy), 'doy must be an integer datatype in matrix_weather'
assert pd.api.types.is_integer_dtype(matrix_weather.year), 'year must be an integer datatype in matrix_weather'
assert len(matrix_weather) <= _max_weather_size, 'maximum run size is {} days'.format(_max_weather_size)
assert not matrix_weather.isna().any().any(), 'matrix_weather cannot have na values'
# check to make sure there are no missing days in matrix_weather
start_year = matrix_weather['year'].min()
start_day = matrix_weather.loc[matrix_weather.year == start_year, 'doy'].min()
stop_year = matrix_weather['year'].max()
stop_day = matrix_weather.loc[matrix_weather.year == stop_year, 'doy'].max()
if run_365_calendar:
assert matrix_weather.doy.max() <= 365, 'expected to have leap days removed, and all doy between 1-365'
doy_day_mapper = get_month_day_to_nonleap_doy()
inv_doy_mapper = get_month_day_to_nonleap_doy(key_doy=True)
start_mon, start_dom = inv_doy_mapper[start_day]
stop_mon, stop_dom = inv_doy_mapper[stop_day]
expected_datetimes = pd.date_range(start=f'{start_year}-{start_mon:02d}-{start_dom:02d}',
end=f'{stop_year}-{stop_mon:02d}-{stop_dom:02d}')
expected_datetimes = expected_datetimes[~((expected_datetimes.month == 2) & (expected_datetimes.day == 29))]
expected_years = expected_datetimes.year.values
expected_days = np.array(
[doy_day_mapper[(m, d)] for m, d in zip(expected_datetimes.month, expected_datetimes.day)])
addmess = ' note that leap days are expected to have been removed from matrix weather'
else:
expected_datetimes = pd.date_range(start=pd.to_datetime('{}-{}'.format(start_year, start_day), format='%Y-%j'),
end=pd.to_datetime('{}-{}'.format(stop_year, stop_day), format='%Y-%j'))
expected_years = expected_datetimes.year.values
expected_days = expected_datetimes.dayofyear.values
addmess = ''
check = ((matrix_weather['year'].values == expected_years).all() and
(matrix_weather['doy'].values == expected_days).all())
assert check, 'the date range of matrix_weather contains missing or duplicate days' + addmess
# check harvest data
assert isinstance(days_harvest, pd.DataFrame)
assert set(days_harvest.keys()) == set(days_harvest_keys), 'incorrect keys for days_harvest'
assert pd.api.types.is_integer_dtype(days_harvest.doy), 'doy must be an integer datatype in days_harvest'
assert pd.api.types.is_integer_dtype(days_harvest.year), 'year must be an integer datatype in days_harvest'
assert not days_harvest.isna().any().any(), 'days_harvest cannot have na data'
assert (days_harvest['frac_harv'] <= 1).all(), 'frac_harv cannot be greater than 1'
if run_365_calendar:
assert days_harvest.doy.max() <= 365
if params['fixed_removal'] > 0.9:
assert (days_harvest['harv_trig'] >=
days_harvest['harv_targ']).all(), 'when using fixed harvest mode the harv_trig>=harv_targ'
if auto_harvest:
assert len(matrix_weather) == len(
days_harvest), 'days_harvest and matrix_weather must be the same length(ndays)'
check = (days_harvest['year'].values == matrix_weather.year.values).all() and (
days_harvest['doy'].values == matrix_weather.doy.values).all()
assert check, 'the date range of days_harvest does not match matrix_weather' + addmess
else:
if run_365_calendar:
mapper = get_month_day_to_nonleap_doy(key_doy=True)
strs = [f'{y}-{mapper[doy][0]:02d}-{mapper[doy][1]:02d}' for y, doy in zip(days_harvest.year.values,
days_harvest.doy.values)]
harvest_dt = pd.to_datetime(strs)
else:
strs = ['{}-{:03d}'.format(int(e), int(f)) for e, f in
days_harvest[['year', 'doy']].itertuples(False, None)]
harvest_dt = pd.to_datetime(strs, format='%Y-%j')
assert harvest_dt.min() >= expected_datetimes.min(), 'days_harvest must start at or after first day of simulation'
assert harvest_dt.max() <= expected_datetimes.max(), 'days_harvest must stop at or before last day of simulation'
# doy_irr tests
assert isinstance(doy_irr, np.ndarray), 'doy_irr must be convertable to a numpy array'
assert doy_irr.ndim == 1, 'doy_irr must be 1d'
assert pd.api.types.is_integer_dtype(doy_irr), 'doy_irr must be integers'
assert doy_irr.max() <= 366, 'entries doy_irr must not be greater than 366'
assert doy_irr.min() >= 0, 'entries doy_irr must not be less than 0'
# pass a warning if max_irr is greater than abs_max_irr
if matrix_weather.loc[:, 'max_irr'].max() > params['abs_max_irr']:
warn(f'maximum weather_matrix max_irr ({matrix_weather.loc[:, "max_irr"].max()}) > absolute maximum '
f'irrigation {params["abs_max_irr"]}. The extra irrigation can never be applied but may be available for '
f'storage.')
def get_month_day_to_nonleap_doy(key_doy=False):
"""
:param key_doy: bool, if true the keys are doy, else keys are (month, dayofmonth)
:return: dictionary if not inverse: {(m,d}:doy} if inverse: {doy: (m,d)}
"""
temp = pd.date_range('2025-01-01', '2025-12-31') # a random non leap year
day = temp.day
month = temp.month
doy = temp.dayofyear
if key_doy:
out = {dd: (m, d) for m, d, dd in zip(month, day, doy)}
else:
out = {(m, d): dd for m, d, dd in zip(month, day, doy)}
return out
if __name__ == '__main__':
pass
|
from django.urls import (
path,
)
from .views import (
proxy_document,
proxy_pdf,
)
app_name = 'django_simple_file_handler'
urlpatterns = [
path(
'documents/<proxy_slug>',
proxy_document,
name='proxy_document',
),
path(
'pdf/<proxy_slug>',
proxy_pdf,
name='proxy_pdf',
),
]
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# This modules allows for querying and inserting perf result data into the
# perf datastore. Currently it has very basic functionality supported with
# little error handling.
# TODO: Make this more robust, add better logging
#
import MySQLdb
import os
import sys
from datetime import datetime
from functools import wraps
# Class that allows for interaction with the perf backend.
class PerfResultDataStore(object):
def __init__(self, host, username, password, database_name):
print 'Database Connection Info -> %s:%s@%s/%s' % \
(username, password, host, database_name)
self.connection = MySQLdb.connect(host, username, password, database_name)
def get_file_format_id(self, file_format, compression):
""" Gets the file_format_id for the fiven file_format/compression codec"""
return self.__get_file_format_id(file_format, compression)
def get_query_id(self, query_name, query):
""" Gets the query_id for the given query name and query text """
return self.__get_query_id(query_name, query)
def get_workload_id(self, workload, scale_factor):
""" Gets the workload_id for the given workload / scale factor """
return self.__get_workload_id(workload, scale_factor)
def insert_query_info(self, query_name, query_string):
""" Inserts a new record into the Query table and returns the ID """
return self.__insert_query_info(query_name, query_string)
def insert_run_info(self, run_info):
""" Inserts a new record into the run_info table and returns the ID """
return self.__insert_run_info(run_info)
def insert_workload_info(self, workload_name, scale_factor):
""" Inserts a new record into the Workload table and returns the ID """
return self.__insert_workload_info(workload_name, scale_factor)
def insert_execution_result(self, query_id, workload_id, file_type_id, num_clients,
cluster_name, executor_name, avg_time, stddev, run_date, version, notes,
run_info_id, num_iterations, runtime_profile, is_official=False):
""" Inserts a perf execution result record """
return self.__insert_execution_result(query_id, workload_id, file_type_id,
num_clients, cluster_name, executor_name, avg_time, stddev, run_date, version,
notes, run_info_id, num_iterations, runtime_profile, is_official)
def print_execution_results(self, run_info_id):
""" Prints results that were inserted for the given run_info_id """
self.__print_execution_results(run_info_id)
def cursor_wrapper(function):
""" Handles the common initialize/close pattern for cursor objects """
@wraps(function)
def wrapper(*args, **kwargs):
# args[0] is should be "self" -> PerfResultDataStore.
# TODO: Is there a better way to get at 'self' from here?
cursor = args[0].connection.cursor()
result = function(*args, cursor=cursor)
cursor.close()
return result
return wrapper
# Internal methods
@cursor_wrapper
def __get_file_format_id(self, file_format, compression, cursor):
""" Gets the file_format_id for the fiven file_format/compression codec"""
if compression == 'none':
compression_codec, compression_type = ['none', 'none']
else:
compression_codec, compression_type = compression.split('/')
result = cursor.execute("select file_type_id from FileType where format=%s and "\
"compression_codec=%s and compression_type=%s",
(file_format, compression_codec, compression_type))
file_format_id = cursor.fetchone()
return file_format_id[0] if file_format_id else None
@cursor_wrapper
def __get_query_id(self, query_name, query, cursor):
result = cursor.execute("select query_id from Query where name=%s", query_name)
query_id = cursor.fetchone()
return query_id[0] if query_id else None
@cursor_wrapper
def __get_workload_id(self, workload, scale_factor, cursor):
result = cursor.execute("select workload_id from Workload where name=%s and "\
"scale_factor=%s", (workload, scale_factor))
workload_id = cursor.fetchone()
return workload_id[0] if workload_id else None
@cursor_wrapper
def __insert_run_info(self, run_info, cursor):
cursor.execute("insert into RunInfo (run_info) values (%s)", run_info)
result = cursor.execute("SELECT LAST_INSERT_ID()")
run_info_id = cursor.fetchone()
return run_info_id[0] if run_info_id else None
@cursor_wrapper
def __insert_execution_result(self, query_id, workload_id, file_type_id, num_clients,
cluster_name, executor_name, avg_time, stddev, run_date, version, notes,
run_info_id, num_iterations, runtime_profile, is_official, cursor):
result = cursor.execute("insert into ExecutionResults (run_info_id, query_id, "\
"workload_id, file_type_id, num_clients, cluster_name, executor_name, avg_time,"\
" stddev, run_date, version, notes, num_iterations, profile, is_official) values"\
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)", \
(run_info_id, query_id, workload_id, file_type_id, num_clients, cluster_name,
executor_name, avg_time, stddev, run_date, version, notes, num_iterations,
runtime_profile, is_official))
@cursor_wrapper
def __insert_query_info(self, name, query, cursor):
cursor.execute("insert into Query (name, query) values (%s, %s)", (name, query))
result = cursor.execute("SELECT LAST_INSERT_ID()")
query_id = cursor.fetchone()
return query_id[0] if query_id else None
@cursor_wrapper
def __insert_workload_info(self, name, scale_factor, cursor):
cursor.execute("insert into Workload (name, scale_factor) "\
"values(%s, %s)", (name, scale_factor))
result = cursor.execute("SELECT LAST_INSERT_ID()")
workload_id = cursor.fetchone()
return workload_id[0] if workload_id else None
@cursor_wrapper
def __print_execution_results(self, run_info_id, cursor):
result = cursor.execute("select e.executor_name, e.run_date, q.name, w.name, "\
"f.format, f.compression_codec, f.compression_type, "\
"e.avg_time, e.cluster_name, e.notes, r.run_info, "\
"r.run_info_id "\
"from ExecutionResults e "\
"join RunInfo r on (e.run_info_id = r.run_info_id) "\
"join Query q on (e.query_id = q.query_id) "\
"join Workload w on (e.workload_id = w.workload_id) "\
"join FileType f on (e.file_type_id = f.file_type_id) "\
"where e.run_info_id=%d", run_info_id)
results = cursor.fetchall()
for row in results:
print row
|
import requests
from .config import SUPERADMIN, KEEHOST_URL, KEEHOST_APIKEY
def create_super_admin():
""" Create a base super user based on the configuration """
r = requests.post(KEEHOST_URL + '/accounts',
json=SUPERADMIN,
headers={'Authorization': KEEHOST_APIKEY})
if r.status_code == 422:
print("Super admin already exists")
elif r.status_code == 201:
print("Super admin created ! %s" % r.json())
else:
print("Uknown error when creating super admin: %s" % r.json())
def fixtures():
""" Fixtures entry point """
create_super_admin()
|
import unittest.mock
from typing import cast, Any, List, Dict
import pykube.exceptions
import pytest
from pykube import HTTPClient
from pykube.objects import NamespacedAPIObject
from pytest_mock import MockerFixture, MockFixture
from pytest_helm_charts.utils import wait_for_namespaced_objects_condition
MockCR = NamespacedAPIObject
def get_ready_objects_filter_mock(mocker: MockerFixture, k8s_api_call_results: List[Any]) -> unittest.mock.Mock:
objects_mock = mocker.Mock(name="AppCR objects")
filter_mock = mocker.Mock(name="AppCR objects->filter")
objects_mock.filter.return_value = filter_mock
filter_mock.get_by_name.side_effect = k8s_api_call_results
return objects_mock
@pytest.mark.parametrize(
"k8s_api_call_results,missing_ok,expected_result",
[
# One matching app found as expected
([{"status": "expected"}], False, 1),
# One not matching app found and missing is OK
([{"status": "unexpected"}], True, TimeoutError),
# One matching and one not and missing is OK
([{"status": "expected"}, {"status": "unexpected"}], True, TimeoutError),
# One not matching app found and missing is not OK
([pykube.exceptions.ObjectDoesNotExist], False, pykube.exceptions.ObjectDoesNotExist),
# One matching and one not found; missing is OK
([{"status": "expected"}, pykube.exceptions.ObjectDoesNotExist], True, TimeoutError),
# One matching and one not found; missing is not OK
([{"status": "expected"}, pykube.exceptions.ObjectDoesNotExist], False, pykube.exceptions.ObjectDoesNotExist),
],
ids=[
"One matching app found as expected",
"One not matching app found and missing is OK",
"One matching and one not and missing is OK",
"One not matching app found and missing is not OK",
"One matching and one not found; missing is OK",
"One matching and one not found; missing is not OK",
],
)
def test_wait_for_namespaced_objects_condition(
mocker: MockFixture, k8s_api_call_results: List[Any], missing_ok: bool, expected_result: Any
) -> None:
objects_mock = get_ready_objects_filter_mock(mocker, k8s_api_call_results)
mocker.patch("tests.test_utils.MockCR")
cast(unittest.mock.Mock, MockCR).objects.return_value = objects_mock
check_fun_called = False
def check_fun(obj: MockCR) -> bool:
assert obj is not None
nonlocal check_fun_called
check_fun_called = True
hacked_type_dict = cast(Dict[str, Any], obj)
return hacked_type_dict["status"] == "expected"
try:
result = wait_for_namespaced_objects_condition(
cast(HTTPClient, None), MockCR, ["mock_cr"] * len(k8s_api_call_results), "test_ns", check_fun, 1, missing_ok
)
except Exception as e:
if (expected_result is TimeoutError and type(e) is TimeoutError) or (
expected_result is pykube.exceptions.ObjectDoesNotExist and type(e) is pykube.exceptions.ObjectDoesNotExist
):
# we have the expected exception
pass
else:
raise
else:
assert type(expected_result) is int
assert len(result) == expected_result
assert result == k8s_api_call_results
assert check_fun_called
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import singledispatch
import warnings
from jax import config, nn, random, tree_util
import jax.numpy as jnp
try:
# jaxns changes the default precision to double precision
# so here we undo that action
use_x64 = config.jax_enable_x64
from jaxns.nested_sampling import NestedSampler as OrigNestedSampler
from jaxns.plotting import plot_cornerplot, plot_diagnostics
from jaxns.prior_transforms.common import ContinuousPrior
from jaxns.prior_transforms.prior_chain import PriorChain, UniformBase
from jaxns.utils import summary
config.update("jax_enable_x64", use_x64)
except ImportError as e:
raise ImportError(
"To use this module, please install `jaxns` package. It can be"
" installed with `pip install jaxns`"
) from e
import numpyro
import numpyro.distributions as dist
from numpyro.handlers import reparam, seed, trace
from numpyro.infer import Predictive
from numpyro.infer.reparam import Reparam
from numpyro.infer.util import _guess_max_plate_nesting, _validate_model, log_density
__all__ = ["NestedSampler"]
class UniformPrior(ContinuousPrior):
def __init__(self, name, shape):
prior_base = UniformBase(shape, jnp.result_type(float))
super().__init__(name, shape, parents=[], tracked=True, prior_base=prior_base)
def transform_U(self, U, **kwargs):
return U
@singledispatch
def uniform_reparam_transform(d):
"""
A helper for :class:`UniformReparam` to get the transform that transforms
a uniform distribution over a unit hypercube to the target distribution `d`.
"""
if isinstance(d, dist.TransformedDistribution):
outer_transform = dist.transforms.ComposeTransform(d.transforms)
return lambda q: outer_transform(uniform_reparam_transform(d.base_dist)(q))
if isinstance(
d, (dist.Independent, dist.ExpandedDistribution, dist.MaskedDistribution)
):
return lambda q: uniform_reparam_transform(d.base_dist)(q)
return d.icdf
@uniform_reparam_transform.register(dist.MultivariateNormal)
def _(d):
outer_transform = dist.transforms.LowerCholeskyAffine(d.loc, d.scale_tril)
return lambda q: outer_transform(dist.Normal(0, 1).icdf(q))
@uniform_reparam_transform.register(dist.BernoulliLogits)
@uniform_reparam_transform.register(dist.BernoulliProbs)
def _(d):
def transform(q):
x = q < d.probs
return x.astype(jnp.result_type(x, int))
return transform
@uniform_reparam_transform.register(dist.CategoricalLogits)
@uniform_reparam_transform.register(dist.CategoricalProbs)
def _(d):
return lambda q: jnp.sum(jnp.cumsum(d.probs, axis=-1) < q[..., None], axis=-1)
@uniform_reparam_transform.register(dist.Dirichlet)
def _(d):
gamma_dist = dist.Gamma(d.concentration)
def transform_fn(q):
# NB: icdf is not available yet for Gamma distribution
# so this will raise an NotImplementedError for now.
# We will need scipy.special.gammaincinv, which is not available yet in JAX
# see issue: https://github.com/google/jax/issues/5350
# TODO: consider wrap jaxns GammaPrior transform implementation
gammas = uniform_reparam_transform(gamma_dist)(q)
return gammas / gammas.sum(-1, keepdims=True)
return transform_fn
class UniformReparam(Reparam):
"""
Reparameterize a distribution to a Uniform over the unit hypercube.
Most univariate distribution uses Inverse CDF for the reparameterization.
"""
def __call__(self, name, fn, obs):
assert obs is None, "TransformReparam does not support observe statements"
shape = fn.shape()
fn, expand_shape, event_dim = self._unwrap(fn)
transform = uniform_reparam_transform(fn)
tiny = jnp.finfo(jnp.result_type(float)).tiny
x = numpyro.sample(
"{}_base".format(name),
dist.Uniform(tiny, 1).expand(shape).to_event(event_dim).mask(False),
)
# Simulate a numpyro.deterministic() site.
return None, transform(x)
class NestedSampler:
"""
(EXPERIMENTAL) A wrapper for `jaxns`, a nested sampling package based on JAX.
See reference [1] for details on the meaning of each parameter.
Please consider citing this reference if you use the nested sampler in your research.
.. note:: To enumerate over a discrete latent variable, you can add the keyword
`infer={"enumerate": "parallel"}` to the corresponding `sample` statement.
.. note:: To improve the performance, please consider enabling x64 mode at the beginning
of your NumPyro program ``numpyro.enable_x64()``.
**References**
1. *JAXNS: a high-performance nested sampling package based on JAX*,
Joshua G. Albert (https://arxiv.org/abs/2012.15286)
:param callable model: a call with NumPyro primitives
:param int num_live_points: the number of live points. As a rule-of-thumb, we should
allocate around 50 live points per possible mode.
:param int max_samples: the maximum number of iterations and samples
:param str sampler_name: either "slice" (default value) or "multi_ellipsoid"
:param int depth: an integer which determines the maximum number of ellipsoids to
construct via hierarchical splitting (typical range: 3 - 9, default to 5)
:param int num_slices: the number of slice sampling proposals at each sampling step
(typical range: 1 - 5, default to 5)
:param float termination_frac: termination condition (typical range: 0.001 - 0.01)
(default to 0.01).
**Example**
.. doctest::
>>> from jax import random
>>> import jax.numpy as jnp
>>> import numpyro
>>> import numpyro.distributions as dist
>>> from numpyro.contrib.nested_sampling import NestedSampler
>>> true_coefs = jnp.array([1., 2., 3.])
>>> data = random.normal(random.PRNGKey(0), (2000, 3))
>>> labels = dist.Bernoulli(logits=(true_coefs * data).sum(-1)).sample(random.PRNGKey(1))
>>>
>>> def model(data, labels):
... coefs = numpyro.sample('coefs', dist.Normal(0, 1).expand([3]))
... intercept = numpyro.sample('intercept', dist.Normal(0., 10.))
... return numpyro.sample('y', dist.Bernoulli(logits=(coefs * data + intercept).sum(-1)),
... obs=labels)
>>>
>>> ns = NestedSampler(model)
>>> ns.run(random.PRNGKey(2), data, labels)
>>> samples = ns.get_samples(random.PRNGKey(3), num_samples=1000)
>>> assert jnp.mean(jnp.abs(samples['intercept'])) < 0.05
>>> print(jnp.mean(samples['coefs'], axis=0)) # doctest: +SKIP
[0.93661342 1.95034876 2.86123884]
"""
def __init__(
self,
model,
*,
num_live_points=1000,
max_samples=100000,
sampler_name="slice",
depth=5,
num_slices=5,
termination_frac=0.01
):
self.model = model
self.num_live_points = num_live_points
self.max_samples = max_samples
self.termination_frac = termination_frac
self.sampler_name = sampler_name
self.depth = depth
self.num_slices = num_slices
self._samples = None
self._log_weights = None
self._results = None
def run(self, rng_key, *args, **kwargs):
"""
Run the nested samplers and collect weighted samples.
:param random.PRNGKey rng_key: Random number generator key to be used for the sampling.
:param args: The arguments needed by the `model`.
:param kwargs: The keyword arguments needed by the `model`.
"""
rng_sampling, rng_predictive = random.split(rng_key)
# reparam the model so that latent sites have Uniform(0, 1) priors
prototype_trace = trace(seed(self.model, rng_key)).get_trace(*args, **kwargs)
param_names = [
site["name"]
for site in prototype_trace.values()
if site["type"] == "sample"
and not site["is_observed"]
and site["infer"].get("enumerate", "") != "parallel"
]
deterministics = [
site["name"]
for site in prototype_trace.values()
if site["type"] == "deterministic"
]
reparam_model = reparam(
self.model, config={k: UniformReparam() for k in param_names}
)
# enable enumerate if needed
has_enum = any(
site["type"] == "sample"
and site["infer"].get("enumerate", "") == "parallel"
for site in prototype_trace.values()
)
if has_enum:
from numpyro.contrib.funsor import enum, log_density as log_density_
max_plate_nesting = _guess_max_plate_nesting(prototype_trace)
_validate_model(prototype_trace)
reparam_model = enum(reparam_model, -max_plate_nesting - 1)
else:
log_density_ = log_density
def loglik_fn(**params):
return log_density_(reparam_model, args, kwargs, params)[0]
# use NestedSampler with identity prior chain
prior_chain = PriorChain()
for name in param_names:
prior = UniformPrior(name + "_base", prototype_trace[name]["fn"].shape())
prior_chain.push(prior)
# XXX: the `marginalised` keyword in jaxns can be used to get expectation of some
# quantity over posterior samples; it can be helpful to expose it in this wrapper
ns = OrigNestedSampler(
loglik_fn,
prior_chain,
sampler_name=self.sampler_name,
sampler_kwargs={"depth": self.depth, "num_slices": self.num_slices},
max_samples=self.max_samples,
num_live_points=self.num_live_points,
collect_samples=True,
)
# some places of jaxns uses float64 and raises some warnings if the default dtype is
# float32, so we suppress them here to avoid confusion
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*will be truncated to dtype float32.*"
)
results = ns(rng_sampling, termination_frac=self.termination_frac)
# transform base samples back to original domains
# Here we only transform the first valid num_samples samples
# NB: the number of weighted samples obtained from jaxns is results.num_samples
# and only the first num_samples values of results.samples are valid.
num_samples = results.num_samples
samples = tree_util.tree_map(lambda x: x[:num_samples], results.samples)
predictive = Predictive(
reparam_model, samples, return_sites=param_names + deterministics
)
samples = predictive(rng_predictive, *args, **kwargs)
# replace base samples in jaxns results by transformed samples
self._results = results._replace(samples=samples)
def get_samples(self, rng_key, num_samples):
"""
Draws samples from the weighted samples collected from the run.
:param random.PRNGKey rng_key: Random number generator key to be used to draw samples.
:param int num_samples: The number of samples.
:return: a dict of posterior samples
"""
if self._results is None:
raise RuntimeError(
"NestedSampler.run(...) method should be called first to obtain results."
)
samples, log_weights = self.get_weighted_samples()
p = nn.softmax(log_weights)
idx = random.choice(rng_key, log_weights.shape[0], (num_samples,), p=p)
return {k: v[idx] for k, v in samples.items()}
def get_weighted_samples(self):
"""
Gets weighted samples and their corresponding log weights.
"""
if self._results is None:
raise RuntimeError(
"NestedSampler.run(...) method should be called first to obtain results."
)
num_samples = self._results.num_samples
return self._results.samples, self._results.log_p[:num_samples]
def print_summary(self):
"""
Print summary of the result. This is a wrapper of :func:`jaxns.utils.summary`.
"""
if self._results is None:
raise RuntimeError(
"NestedSampler.run(...) method should be called first to obtain results."
)
summary(self._results)
def diagnostics(self):
"""
Plot diagnostics of the result. This is a wrapper of :func:`jaxns.plotting.plot_diagnostics`
and :func:`jaxns.plotting.plot_cornerplot`.
"""
if self._results is None:
raise RuntimeError(
"NestedSampler.run(...) method should be called first to obtain results."
)
plot_diagnostics(self._results)
plot_cornerplot(self._results)
|
# Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
def f(i: int) -> int:
nonlocal a
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
return f(i-1)
a = 1
return f(y)
# Input parameter
# n:int = 42
# # Run [0, n]
# i:int = 0
# # Crunch
# while i <= n:
# print(exp(2, i % 31))
# i = i + 1
# __assert__(exp(2,3) == 8)
# __assert__(exp(3,3) == 27)
# __assert__(exp(3,4) == 81)
# __assert__(exp(4,4) == 256)
# __assert__(exp(5,1) == 5)
# __assert__(exp(1,99) == 1)
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import json
import re
from http import HTTPStatus
import logging
import tarfile
from flask_restful import Resource, reqparse, request
from google.protobuf.json_format import ParseDict, ParseError
from fedlearner_webconsole.workflow_template.models import WorkflowTemplate
from fedlearner_webconsole.proto import workflow_definition_pb2
from fedlearner_webconsole.db import db
from fedlearner_webconsole.exceptions import (
NotFoundException, InvalidArgumentException,
ResourceConflictException)
def _classify_variable(variable):
if variable.value_type == 'CODE':
try:
json.loads(variable.value)
except json.JSONDecodeError as e:
raise InvalidArgumentException(str(e))
return variable
def dict_to_workflow_definition(config):
try:
template_proto = ParseDict(config,
workflow_definition_pb2.WorkflowDefinition())
for variable in template_proto.variables:
_classify_variable(variable)
for job in template_proto.job_definitions:
for variable in job.variables:
_classify_variable(variable)
except ParseError as e:
raise InvalidArgumentException(details={'config': str(e)})
return template_proto
def _dic_without_key(d, key):
result = dict(d)
del result[key]
return result
class WorkflowTemplatesApi(Resource):
def get(self):
templates = WorkflowTemplate.query
if 'group_alias' in request.args:
templates = templates.filter_by(
group_alias=request.args['group_alias'])
if 'is_left' in request.args:
is_left = request.args.get(key='is_left', type=int)
if is_left is None:
raise InvalidArgumentException('is_left must be 0 or 1')
templates = templates.filter_by(is_left=is_left)
# remove config from dicts to reduce the size of the list
return {'data': [_dic_without_key(t.to_dict(),
'config') for t in templates.all()
]}, HTTPStatus.OK
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name', required=True, help='name is empty')
parser.add_argument('comment')
parser.add_argument('config', type=dict, required=True,
help='config is empty')
data = parser.parse_args()
name = data['name']
comment = data['comment']
config = data['config']
if WorkflowTemplate.query.filter_by(name=name).first() is not None:
raise ResourceConflictException(
'Workflow template {} already exists'.format(name))
template_proto = _check_config(config)
template = WorkflowTemplate(name=name,
comment=comment,
group_alias=template_proto.group_alias,
is_left=template_proto.is_left)
template.set_config(template_proto)
db.session.add(template)
db.session.commit()
logging.info('Inserted a workflow_template to db')
return {'data': template.to_dict()}, HTTPStatus.CREATED
class WorkflowTemplateApi(Resource):
def get(self, template_id):
result = WorkflowTemplate.query.filter_by(id=template_id).first()
if result is None:
raise NotFoundException()
return {'data': result.to_dict()}, HTTPStatus.OK
def delete(self, template_id):
result = WorkflowTemplate.query.filter_by(id=template_id)
if result.first() is None:
raise NotFoundException()
result.delete()
db.session.commit()
return {'data': {}}, HTTPStatus.OK
def put(self, template_id):
parser = reqparse.RequestParser()
parser.add_argument('name', required=True, help='name is empty')
parser.add_argument('comment')
parser.add_argument('config', type=dict, required=True,
help='config is empty')
data = parser.parse_args()
name = data['name']
comment = data['comment']
config = data['config']
tmp = WorkflowTemplate.query.filter_by(name=name).first()
if tmp is not None and tmp.id != template_id:
raise ResourceConflictException(
'Workflow template {} already exists'.format(name))
template = WorkflowTemplate.query.filter_by(id=template_id).first()
if template is None:
raise NotFoundException()
template_proto = _check_config(config)
template.set_config(template_proto)
template.name = name
template.comment = comment
template.group_alias = template_proto.group_alias
template.is_left = template_proto.is_left
db.session.commit()
return {'data': template.to_dict()}, HTTPStatus.OK
def _check_config(config):
# TODO: needs tests
if 'group_alias' not in config:
raise InvalidArgumentException(details={
'config.group_alias': 'config.group_alias is required'})
if 'is_left' not in config:
raise InvalidArgumentException(
details={'config.is_left': 'config.is_left is required'})
# form to proto buffer
template_proto = dict_to_workflow_definition(config)
for index, job_def in enumerate(template_proto.job_definitions):
# pod label name must be no more than 63 characters.
# workflow.uuid is 20 characters, pod name suffix such as
# '-follower-master-0' is less than 19 characters, so the
# job name must be no more than 24
if len(job_def.name) > 24:
raise InvalidArgumentException(
details=
{f'config.job_definitions[{index}].job_name'
: 'job_name must be no more than 24 characters'})
# limit from k8s
if not re.match('[a-z0-9-]*', job_def.name):
raise InvalidArgumentException(
details=
{f'config.job_definitions[{index}].job_name'
: 'Only letters(a-z), numbers(0-9) '
'and dashes(-) are supported.'})
return template_proto
class CodeApi(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('code_path', type=str, location='args',
required=True,
help='code_path is required')
data = parser.parse_args()
code_path = data['code_path']
try:
with tarfile.open(code_path) as tar:
code_dict = {}
for file in tar.getmembers():
if tar.extractfile(file) is not None:
if '._' not in file.name and file.isfile():
code_dict[file.name] = str(
tar.extractfile(file).read(),
encoding='utf-8')
return {'data': code_dict}, HTTPStatus.OK
except Exception as e:
logging.error('Get code: %s', repr(e))
raise InvalidArgumentException(details={'code_path': 'wrong path'})
def initialize_workflow_template_apis(api):
api.add_resource(WorkflowTemplatesApi, '/workflow_templates')
api.add_resource(WorkflowTemplateApi,
'/workflow_templates/<int:template_id>')
api.add_resource(CodeApi, '/codes')
|
class SolidCurveIntersection(object,IEnumerable[Curve],IEnumerable,IDisposable):
""" This class represents the results of a calculation of intersection between a solid volume and a curve. """
def Dispose(self):
""" Dispose(self: SolidCurveIntersection) """
pass
def GetCurveSegment(self,index):
"""
GetCurveSegment(self: SolidCurveIntersection,index: int) -> Curve
Gets the curve segment generated by intersection.
index: The index.
Returns: The curve.
"""
pass
def GetCurveSegmentExtents(self,index):
"""
GetCurveSegmentExtents(self: SolidCurveIntersection,index: int) -> CurveExtents
Gets the extents for the given curve segment generated by intersection.
index: The index.
Returns: The curve extents.
"""
pass
def GetEnumerator(self):
"""
GetEnumerator(self: SolidCurveIntersection) -> IEnumerator[Curve]
Returns an enumerator that iterates through a collection.
Returns: An IEnumerator object that can be used to iterate through the collection.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: SolidCurveIntersection,disposing: bool) """
pass
def __contains__(self,*args):
""" __contains__[Curve](enumerable: IEnumerable[Curve],value: Curve) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: SolidCurveIntersection) -> bool
"""
ResultType=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The result type used to calculate the intersections.
Get: ResultType(self: SolidCurveIntersection) -> SolidCurveIntersectionMode
"""
SegmentCount=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The number of segments in the results.
Get: SegmentCount(self: SolidCurveIntersection) -> int
"""
|
from xml.etree import ElementTree as ET
class ShardParser:
"""
Parses an XML object from a string.
"""
def __init__(self, xml):
self.xml = xml
def parse(self):
"""
Parses the XML object.
"""
return ET.fromstring(self.xml)
def search_for_element(self, element):
"""
Searches for an element in the XML object.
"""
return self.parse().find(element)
def search_for_tag(self, tag):
"""
Searches for a tag in the XML object.
"""
return self.parse().find(tag)
def jsonify(self):
"""
Converts the XML object to a JSON object.
"""
return ET.tostring(self.parse(), encoding="unicode")
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def freertos_repos():
http_archive(
name = "freertos",
build_file = Label("//third_party/freertos:BUILD.freertos.bazel"),
sha256 = "c4c29136071b84841c3f00675da35f5e61e83c1fa18ac43841c478f99190dd7d",
strip_prefix = "FreeRTOS-Kernel-0b1e9d79c82c1bf00e93142f9d5b1b7b62446995",
urls = [
"https://github.com/FreeRTOS/FreeRTOS-Kernel/archive/0b1e9d79c82c1bf00e93142f9d5b1b7b62446995.tar.gz",
],
patches = [
Label("//third_party/freertos:0001-Remove-mtime-address-macros.patch"),
Label("//third_party/freertos:0002-Remove-references-to-stdlib.h.patch"),
Label("//third_party/freertos:0003-Replace-string.h-with-references-to-OT-memory.h.patch"),
],
patch_args = ["-p1"],
)
|
from app.libraries.token_generator import TokenGenerator
from flask_jwt_extended.internal_utils import verify_token_type
from flask_jwt_extended.utils import get_jwt, get_jwt_identity
from flask_restful import Resource
from flask import request
from app.models.user import User
from app.response import response
from app.libraries.access_jwt import refresh_jwt_required
from app.core.service.user_service import UserService
class RegisterController(Resource):
def post(self):
try:
user = UserService()
user = user.create(
name=request.json['name'],
email=request.json['email'],
password=request.json['password'],
confirmation_password=request.json['confirmation_password']
)
return response.ok('User Registered!', user)
except Exception as e:
return response.bad_request("{}".format(e), '')
class AuthController(Resource):
def post(self):
try:
email = request.json['email']
password = request.json['password']
user = UserService().auth(email=email, password=password)
return response.ok(f'Succesfully logged in, welcome {user["email"]}!', user)
except Exception as e:
return response.bad_request("{}".format(e), '')
class RefreshTokenController(Resource):
@refresh_jwt_required
def post(self):
try:
token = get_jwt()
if 'type' not in token and token['type'] != "refresh":
return response.un_authorized("Token is not refresh token!", "");
jwt_identity = get_jwt_identity()
user = User.objects(id=jwt_identity['id']).first()
if not user:
return response.bad_request("Token is not valid", "")
payload = TokenGenerator(user).generate_access_token()
return response.ok(f'Token refreshed!', payload)
except Exception as e:
return response.bad_request("{}".format(e), '')
|
# coding=utf-8
from __future__ import unicode_literals
import re
import unittest
from ukpostcodeparser.parser import parse_uk_postcode
from faker import Faker
from faker.providers.address.de_AT import Provider as DeAtProvider
from faker.providers.address.de_DE import Provider as DeProvider
from faker.providers.address.fa_IR import Provider as IrProvider
from faker.providers.address.el_GR import Provider as GrProvider
from faker.providers.address.en_AU import Provider as EnAuProvider
from faker.providers.address.en_CA import Provider as EnCaProvider
from faker.providers.address.en_US import Provider as EnUsProvider
from faker.providers.address.fr_FR import Provider as FrFrProvider
from faker.providers.address.fi_FI import Provider as FiProvider
from faker.providers.address.pt_PT import Provider as PtPtProvider
from faker.providers.address.ja_JP import Provider as JaProvider
from faker.providers.address.ne_NP import Provider as NeProvider
from six import string_types
class TestBaseProvider(unittest.TestCase):
""" Tests addresses in the base provider """
def setUp(self):
self.factory = Faker('')
def test_alpha_2_country_codes(self):
country_code = Faker().country_code(representation='alpha-2')
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_2_country_codes_as_default(self):
country_code = Faker().country_code()
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_3_country_codes(self):
country_code = Faker().country_code(representation='alpha-3')
assert len(country_code) == 3
assert country_code.isalpha()
def test_bad_country_code_representation(self):
with self.assertRaises(ValueError):
Faker().country_code(representation='hello')
class TestAr_AA(unittest.TestCase):
""" Tests addresses in the ar_AA locale """
def setUp(self):
self.factory = Faker('ar_AA')
def test_alpha_2_country_codes(self):
country_code = Faker().country_code(representation='alpha-2')
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_2_country_codes_as_default(self):
country_code = Faker().country_code()
assert len(country_code) == 2
assert country_code.isalpha()
def test_alpha_3_country_codes(self):
country_code = Faker().country_code(representation='alpha-3')
assert len(country_code) == 3
assert country_code.isalpha()
def test_bad_country_code_representation(self):
with self.assertRaises(ValueError):
Faker().country_code(representation='hello')
class TestCsCZ(unittest.TestCase):
""" Tests in addresses in the cs_CZ locale """
def setUp(self):
self.factory = Faker('cs_CZ')
def test_street_suffix_short(self):
street_suffix_short = self.factory.street_suffix_short()
assert isinstance(street_suffix_short, string_types)
def test_street_suffix_long(self):
street_suffix_long = self.factory.street_suffix_long()
assert isinstance(street_suffix_long, string_types)
def test_city_name(self):
city = self.factory.city_name()
assert isinstance(city, string_types)
def test_street_name(self):
street_name = self.factory.street_name()
assert isinstance(street_name, string_types)
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
def test_postcode(self):
postcode = self.factory.postcode()
assert isinstance(postcode, string_types)
def test_city_with_postcode(self):
city_with_postcode = self.factory.city_with_postcode()
assert isinstance(city_with_postcode, string_types)
class TestDeAT(unittest.TestCase):
""" Tests in addresses in the de_AT locale """
def setUp(self):
self.factory = Faker('de_AT')
def test_city(self):
city = self.factory.city()
assert isinstance(city, string_types)
assert city in DeAtProvider.cities
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in DeAtProvider.states
def test_street_suffix_short(self):
street_suffix_short = self.factory.street_suffix_short()
assert isinstance(street_suffix_short, string_types)
assert street_suffix_short in DeAtProvider.street_suffixes_short
def test_street_suffix_long(self):
street_suffix_long = self.factory.street_suffix_long()
assert isinstance(street_suffix_long, string_types)
assert street_suffix_long in DeAtProvider.street_suffixes_long
def test_country(self):
country = self.factory.country()
assert isinstance(country, string_types)
assert country in DeAtProvider.countries
def test_postcode(self):
postcode = self.factory.postcode()
assert re.match(r"\d{4}", postcode)
def test_city_with_postcode(self):
city_with_postcode = self.factory.city_with_postcode()
assert isinstance(city_with_postcode, string_types)
class TestDeDE(unittest.TestCase):
""" Tests in addresses in the de_DE locale """
def setUp(self):
self.factory = Faker('de_DE')
def test_city(self):
city = self.factory.city()
assert isinstance(city, string_types)
assert city in DeProvider.cities
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in DeProvider.states
def test_street_suffix_short(self):
street_suffix_short = self.factory.street_suffix_short()
assert isinstance(street_suffix_short, string_types)
assert street_suffix_short in DeProvider.street_suffixes_short
def test_street_suffix_long(self):
street_suffix_long = self.factory.street_suffix_long()
assert isinstance(street_suffix_long, string_types)
assert street_suffix_long in DeProvider.street_suffixes_long
def test_country(self):
country = self.factory.country()
assert isinstance(country, string_types)
assert country in DeProvider.countries
def test_city_with_postcode(self):
city_with_postcode = self.factory.city_with_postcode()
assert isinstance(city_with_postcode, string_types)
class TestFaIR(unittest.TestCase):
""" Tests in addresses in the fa_IR locale """
def setUp(self):
self.factory = Faker('fa_IR')
def test_city_prefix(self):
city_prefix = self.factory.city_prefix()
assert isinstance(city_prefix, string_types)
assert city_prefix in IrProvider.city_prefixes
def test_secondary_address(self):
secondary_address = self.factory.secondary_address()
assert isinstance(secondary_address, string_types)
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in IrProvider.states
class TestFrFR(unittest.TestCase):
""" Tests addresses in the fr_FR locale """
def setUp(self):
self.factory = Faker('fr_FR')
def test_street_prefix(self):
street_prefix = self.factory.street_prefix()
assert isinstance(street_prefix, string_types)
assert street_prefix in FrFrProvider.street_prefixes
def test_city_prefix(self):
city_prefix = self.factory.city_prefix()
assert isinstance(city_prefix, string_types)
assert city_prefix in FrFrProvider.city_prefixes
def test_region(self):
region = self.factory.region()
assert isinstance(region, string_types)
assert region in FrFrProvider.regions
def test_department(self):
department = self.factory.department()
assert isinstance(department, tuple)
assert department in FrFrProvider.departments
def test_department_name(self):
department_name = self.factory.department_name()
assert isinstance(department_name, string_types)
def test_department_number(self):
department_number = self.factory.department_number()
assert isinstance(department_number, string_types)
class TestHeIL(unittest.TestCase):
""" Tests addresses in the he_IL locale """
def setUp(self):
self.factory = Faker('he_IL')
def test_city_name(self):
city_name = self.factory.city_name()
assert isinstance(city_name, string_types)
def test_street_title(self):
street_title = self.factory.street_title()
assert isinstance(street_title, string_types)
class TestFiFI(unittest.TestCase):
""" Tests in addresses in the fi_FI locale """
def setUp(self):
self.factory = Faker('fi_FI')
def test_city(self):
city = self.factory.city()
assert isinstance(city, string_types)
assert city in FiProvider.cities
def test_street_suffix(self):
suffix = self.factory.street_suffix()
assert isinstance(suffix, string_types)
assert suffix in FiProvider.street_suffixes
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in FiProvider.states
class TestElGR(unittest.TestCase):
""" Tests addresses in the el_GR locale """
def setUp(self):
self.factory = Faker('el_GR')
def test_line_address(self):
address = self.factory.line_address()
assert isinstance(address, string_types)
def test_street_prefix_short(self):
street_prefix_short = self.factory.street_prefix_short()
assert isinstance(street_prefix_short, string_types)
assert street_prefix_short in GrProvider.street_prefixes_short
def test_street_prefix_long(self):
street_prefix_long = self.factory.street_prefix_long()
assert isinstance(street_prefix_long, string_types)
assert street_prefix_long in GrProvider.street_prefixes_long
def test_street(self):
street = self.factory.street()
assert isinstance(street, string_types)
assert street in GrProvider.localities
def test_city(self):
city = self.factory.city()
assert isinstance(city, string_types)
assert city in GrProvider.cities
def test_region(self):
region = self.factory.region()
assert isinstance(region, string_types)
assert region in GrProvider.regions
class TestEnAU(unittest.TestCase):
""" Tests addresses in the en_AU locale """
def setUp(self):
self.factory = Faker('en_AU')
def test_postcode(self):
for _ in range(100):
postcode = self.factory.postcode()
assert re.match(r"\d{4}", postcode)
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in EnAuProvider.states
def test_city_prefix(self):
city_prefix = self.factory.city_prefix()
assert isinstance(city_prefix, string_types)
assert city_prefix in EnAuProvider.city_prefixes
def test_state_abbr(self):
state_abbr = self.factory.state_abbr()
assert isinstance(state_abbr, string_types)
assert state_abbr in EnAuProvider.states_abbr
assert state_abbr.isupper()
class TestEnNZ(unittest.TestCase):
""" Tests addresses in the en_NZ locale """
def setUp(self):
self.factory = Faker('en_NZ')
def test_state(self):
# No states in New Zealand
state = self.factory.state()
assert state == ''
def test_postcode(self):
for _ in range(100):
postcode = self.factory.postcode()
assert re.match(r"\d{4}", postcode)
class TestEnCA(unittest.TestCase):
""" Tests addresses in en_CA locale """
def setUp(self):
self.factory = Faker('en_CA')
def test_postcode(self):
for _ in range(100):
postcode = self.factory.postcode()
assert re.match(r"[A-Z][0-9][A-Z] ?[0-9][A-Z][0-9]",
postcode)
def test_postalcode(self):
for _ in range(100):
postalcode = self.factory.postalcode()
assert re.match(r"[A-Z][0-9][A-Z] ?[0-9][A-Z][0-9]",
postalcode)
def test_postal_code_letter(self):
postal_code_letter = self.factory.postal_code_letter()
assert re.match(r"[A-Z]", postal_code_letter)
def test_province(self):
province = self.factory.province()
assert isinstance(province, string_types)
assert province in EnCaProvider.provinces
def test_province_abbr(self):
province_abbr = self.factory.province_abbr()
assert isinstance(province_abbr, string_types)
assert province_abbr in EnCaProvider.provinces_abbr
def test_city_prefix(self):
city_prefix = self.factory.city_prefix()
assert isinstance(city_prefix, string_types)
assert city_prefix in EnCaProvider.city_prefixes
def test_secondary_address(self):
secondary_address = self.factory.secondary_address()
assert isinstance(secondary_address, string_types)
class TestEnGB(unittest.TestCase):
""" Tests addresses in the en_GB locale """
def setUp(self):
self.factory = Faker('en_GB')
def test_postcode(self):
for _ in range(100):
assert isinstance(parse_uk_postcode(self.factory.postcode()), tuple)
class TestEnUS(unittest.TestCase):
""" Tests addresses in the en_US locale """
def setUp(self):
self.factory = Faker('en_US')
def test_city_prefix(self):
city_prefix = self.factory.city_prefix()
assert isinstance(city_prefix, string_types)
assert city_prefix in EnUsProvider.city_prefixes
def test_state(self):
state = self.factory.state()
assert isinstance(state, string_types)
assert state in EnUsProvider.states
def test_state_abbr(self):
state_abbr = self.factory.state_abbr()
assert isinstance(state_abbr, string_types)
states_and_territories = EnUsProvider.states_and_territories_abbr
assert state_abbr in states_and_territories
def test_state_abbr_no_territories(self):
state_abbr = self.factory.state_abbr(include_territories=False)
assert isinstance(state_abbr, string_types)
assert state_abbr in EnUsProvider.states_abbr
def test_postcode(self):
for _ in range(100):
postcode = self.factory.postcode()
assert re.match(r"\d{5}", postcode)
def test_zipcode(self):
for _ in range(100):
zipcode = self.factory.zipcode()
assert re.match(r"\d{5}", zipcode)
def test_zipcode_plus4(self):
for _ in range(100):
zipcode_plus4 = self.factory.zipcode_plus4()
assert re.match(r"\d{5}(-\d{4})", zipcode_plus4)
def test_military_ship(self):
military_ship = self.factory.military_ship()
assert isinstance(military_ship, string_types)
assert military_ship in EnUsProvider.military_ship_prefix
assert re.match(r"[A-Z]", military_ship)
def test_military_state(self):
military_state = self.factory.military_state()
assert isinstance(military_state, string_types)
assert military_state in EnUsProvider.military_state_abbr
assert re.match(r"[A-Z]", military_state)
def test_military_apo(self):
military_apo = self.factory.military_apo()
assert isinstance(military_apo, string_types)
def test_military_dpo(self):
military_dpo = self.factory.military_dpo()
assert isinstance(military_dpo, string_types)
class TestHuHU(unittest.TestCase):
""" Tests addresses in the hu_HU locale """
def setUp(self):
self.factory = Faker('hu_HU')
def test_postcode_first_digit(self):
# Hungarian postcodes begin with 'H-' followed by 4 digits.
# The first digit may not begin with a zero.
for _ in range(100):
pcd = self.factory.postcode()
assert pcd[2] > "0"
def test_street_address(self):
"""
Tests street address.
A street address must consist of a street name, a place type and a number, and end in a period point.
"""
address = self.factory.street_address()
assert address[-1] == '.'
# Check for correct capitalisation of place type
assert address.split(" ")[-2][0].islower()
# Check for street number format
assert re.match(r"\d{1,4}\.", address.split(" ")[-1])
def test_street_address_with_county(self):
"""Tests street address with country. A street address must be:
- in three rows,
- starting with a valid street address,
- contain a valid post code,
- contain the place name validly capitalized.
"""
address = self.factory.street_address_with_county()
# Number of rows
assert len(address.split("\n")) == 3
first, second, last = address.split("\n")
# Test street address
assert first[0].isupper()
assert first.split(" ")[-2][0].islower()
assert re.match(r"\d{1,4}\.", first.split(" ")[-1])
# Test county line
assert second.split(" ")[-1][0].islower()
assert second.split(" ")[0][0].isupper()
# Test postcode
assert re.match(r"H-[1-9]\d{3}", last.split(" ")[0])
# Test place name capitalization
assert last.split(" ")[-1][0].isupper()
def test_address(self):
""" Tests the address provider in the hu_HU locale """
address = self.factory.address()
assert isinstance(address, string_types)
address_with_county = self.factory.street_address_with_county()
assert isinstance(address_with_county, string_types)
class TestJaJP(unittest.TestCase):
""" Tests addresses in the ja_JP locale """
def setUp(self):
self.factory = Faker('ja')
def test_address(self):
""" Test"""
country = self.factory.country()
assert isinstance(country, string_types)
assert country in JaProvider.countries
prefecture = self.factory.prefecture()
assert isinstance(prefecture, string_types)
assert prefecture in JaProvider.prefectures
city = self.factory.city()
assert isinstance(city, string_types)
assert city in JaProvider.cities
town = self.factory.town()
assert isinstance(town, string_types)
assert town in JaProvider.towns
chome = self.factory.chome()
assert isinstance(chome, string_types)
assert re.match(r"\d{1,2}丁目", chome)
ban = self.factory.ban()
assert isinstance(ban, string_types)
assert re.match(r"\d{1,2}番", ban)
gou = self.factory.gou()
assert isinstance(gou, string_types)
assert re.match(r"\d{1,2}号", gou)
building_name = self.factory.building_name()
assert isinstance(building_name, string_types)
assert building_name in JaProvider.building_names
postcode = self.factory.postcode()
assert isinstance(postcode, string_types)
assert re.match(r"\d{3}-\d{4}", postcode)
zipcode = self.factory.zipcode()
assert isinstance(zipcode, string_types)
assert re.match(r"\d{3}-\d{4}", zipcode)
address = self.factory.address()
assert isinstance(address, string_types)
class TestKoKR(unittest.TestCase):
""" Tests addresses in the ko_KR locale """
def setUp(self):
self.factory = Faker('ko_KR')
def test_address(self):
postcode = self.factory.postcode()
assert isinstance(postcode, string_types)
assert re.match(r"\d{5}", postcode)
postal_code = self.factory.postal_code()
assert isinstance(postal_code, string_types)
assert re.match(r"\d{5}", postal_code)
old_postal_code = self.factory.old_postal_code()
assert isinstance(old_postal_code, string_types)
assert re.match(r"\d{3}-\d{3}", old_postal_code)
class TestNeNP(unittest.TestCase):
""" Tests addresses in the ne_NP locale """
def setUp(self):
self.factory = Faker('ne_NP')
def test_address(self):
""" Tests the street address in ne_NP locale """
country = self.factory.country()
assert isinstance(country, string_types)
assert country in NeProvider.countries
district = self.factory.district()
assert isinstance(district, string_types)
assert district in NeProvider.districts
city = self.factory.city()
assert isinstance(city, string_types)
assert city in NeProvider.cities
class TestNoNO(unittest.TestCase):
""" Tests the street address in no_NO locale """
def setUp(self):
self.factory = Faker('no_NO')
def test_postcode(self):
for _ in range(100):
assert re.match(r'^[0-9]{4}$', self.factory.postcode())
def test_city_suffix(self):
suffix = self.factory.city_suffix()
assert isinstance(suffix, string_types)
def test_street_suffix(self):
suffix = self.factory.street_suffix()
assert isinstance(suffix, string_types)
def test_address(self):
address = self.factory.address()
assert isinstance(address, string_types)
class TestZhTW(unittest.TestCase):
""" Tests addresses in the zh_tw locale """
def setUp(self):
self.factory = Faker('zh_TW')
def test_address(self):
country = self.factory.country()
assert isinstance(country, string_types)
street = self.factory.street_name()
assert isinstance(street, string_types)
city = self.factory.city()
assert isinstance(city, string_types)
address = self.factory.address()
assert isinstance(address, string_types)
class TestZhCN(unittest.TestCase):
""" Tests addresses in the zh_cn locale """
def setUp(self):
self.factory = Faker('zh_CN')
def test_address(self):
country = self.factory.country()
assert isinstance(country, string_types)
street = self.factory.street_name()
assert isinstance(street, string_types)
city = self.factory.street_address()
assert isinstance(city, string_types)
province = self.factory.province()
assert isinstance(province, string_types)
district = self.factory.district()
assert isinstance(district, string_types)
address = self.factory.address()
assert isinstance(address, string_types)
for _ in range(100):
assert re.match(r'\d{5}', self.factory.postcode())
class TestPtBr(unittest.TestCase):
def setUp(self):
self.factory = Faker('pt_BR')
def test_address(self):
country = self.factory.country()
assert isinstance(country, string_types)
street = self.factory.street_name()
assert isinstance(street, string_types)
city = self.factory.street_address()
assert isinstance(city, string_types)
neighborhood = self.factory.neighborhood()
assert isinstance(neighborhood, string_types)
state = self.factory.state()
assert isinstance(state, string_types)
state_abbr = self.factory.state_abbr()
assert isinstance(state_abbr, string_types)
address = self.factory.address()
assert isinstance(address, string_types)
class TestPtPT(unittest.TestCase):
def setUp(self):
self.factory = Faker('pt_PT')
def test_distrito(self):
distrito = self.factory.distrito()
assert isinstance(distrito, string_types)
assert distrito in PtPtProvider.distritos
def test_freguesia(self):
freguesia = self.factory.freguesia()
assert isinstance(freguesia, string_types)
assert freguesia in PtPtProvider.freguesias
|
from .node import Node
from .problem import Problem
from .search import depth_limited_search
from .search import iterative_deepening_search
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys, os, re
import numpy as np
from collections import OrderedDict
import sys, os, re
import numpy as np
from collections import OrderedDict
import pickle
import glob
import datetime, time
import scipy.spatial.distance as distance
from typing import List, Union, Any, Dict, Set
from diff_representation.dataset import DataSet
from diff_representation.change_entry import ChangeExample
from diff_representation.utils.utils import get_entry_str
def get_rank_score(_candidate_scores):
# DCG
cum_score = 0.
score_map = {1: 2, 2: 1, 3: 0}
for i in range(len(_candidate_scores)):
cand_id, cand_score = _candidate_scores[i]
rank = i + 1
rel_score = score_map[cand_score]
cur_score = (np.exp(rel_score) - 1) / float(np.log2(rank + 1))
cum_score += cur_score
return cum_score
def load_query_results(file_path, with_score=True):
f = open(file_path)
line = f.readline()
assert line.startswith('***Seed Query***')
query_id = f.readline().strip()
query_id = query_id[len('Id:'):].strip()
print(f'\tseed query {query_id}', file=sys.stderr)
while not re.match('^\d+ neighbors', line):
line = f.readline().strip()
f.readline()
candidate_scores = []
while True:
line = f.readline()
if not line:
break
e_id = line[len('Id:'):].strip()
while not line.startswith('Score:'):
line = f.readline()
if with_score:
score = int(line[len('Score:'):].strip())
else: score = None
line = f.readline()
assert line.startswith('*****')
candidate_scores.append((e_id, score))
f.close()
return {'seed_change_id': query_id, 'candidate_changes_and_scores': candidate_scores}
def gather_all_query_results_from_annotations(annotation_folder, with_score=True):
relevance_data = dict()
for annotation_file in glob.glob(annotation_folder + '/*.*', recursive=True):
if os.path.isfile(annotation_file):
print(f'loading annotations from {annotation_file}', file=sys.stderr)
result = load_query_results(annotation_file, with_score=with_score)
seed_change_id = result['seed_change_id']
candidate_changes_and_scores = result['candidate_changes_and_scores']
print(f'\t{len(candidate_changes_and_scores)} entries', file=sys.stderr)
relevance_data.setdefault(seed_change_id, dict()).update({k: v for k, v in candidate_changes_and_scores})
return relevance_data
def dcg(candidate_changes_and_scores):
# discounted cumulative gain
cum_score = 0.
score_map = {1: 2, 2: 1, 3: 0}
for i in range(len(candidate_changes_and_scores)):
cand_id, cand_score = candidate_changes_and_scores[i]
rank = i + 1
rel_score = score_map[cand_score]
cur_score = (np.exp(rel_score) - 1) / float(np.log2(rank + 1))
cum_score += cur_score
return cum_score
def ndcg(candidate_changes_and_scores):
# normalized discounted cumulative gain
ranked_candidate_changes_and_scores = sorted(candidate_changes_and_scores, key=lambda x: x[1])
idcg_score = dcg(ranked_candidate_changes_and_scores)
dcg_score = dcg(candidate_changes_and_scores)
ndcg = dcg_score / idcg_score
return ndcg
def get_nn(dataset, feature_vecs, seed_query_id=None, K=30, dist_func=distance.cosine, return_self=False, query_vec=None):
"""get the top-K nearest neighbors given a seed query"""
if seed_query_id:
seed_query_idx = dataset.example_id_to_index[seed_query_id]
query_vec = feature_vecs[seed_query_idx]
example_distances = []
for idx in range(len(dataset.examples)):
if seed_query_id and return_self is False and idx == seed_query_idx:
continue
feat = feature_vecs[idx]
dist = dist_func(feat, query_vec)
example_distances.append((idx, dist))
example_distances.sort(key=lambda x: x[1])
results = []
for idx, dist in example_distances[:K]:
change_entry = dataset.examples[idx]
results.append((change_entry, dist))
return results
def generate_top_k_query_results(dataset: DataSet, feature_vecs: List, seed_query_ids: List[str], model_name=None, eval_folder=None, K=30, dist_func=distance.cosine):
if eval_folder is None:
assert model_name
model_name = model_name.replace('|', '_').replace('/', '_')
eval_folder = f"evaluation/{model_name}/{datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d-%H-%M-%S')}"
if not os.path.exists(eval_folder):
os.makedirs(eval_folder)
print(f'output query results to {eval_folder}', file=sys.stderr)
for seed_query_id in seed_query_ids:
print(f'processing {seed_query_id}', file=sys.stderr)
seed_query_example = dataset.get_example_by_id(seed_query_id)
neighbors = get_nn(dataset, feature_vecs, seed_query_id=seed_query_id, K=30)
f_name = seed_query_id.replace('|', '_').replace('/', '_')
f_name = os.path.join(eval_folder, f_name)
with open(f_name, 'w') as f:
f.write(f'***Seed Query***\n{get_entry_str(seed_query_example)}\n\n\n')
f.write(f'{len(neighbors)} neighbors\n\n')
for example, dist in neighbors:
f.write(get_entry_str(example, dist=dist) + '\n')
def dump_aggregated_query_results_from_query_results_for_annotation(annotation_folders: List[str], output_folder: str, relevance_db: Dict, dataset: DataSet):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
aggregated_query_results = dict()
for annotation_folder in annotation_folders:
query_results = gather_all_query_results_from_annotations(annotation_folder, with_score=False)
for seed_query_id, candidate_changes_and_scores in query_results.items():
aggregated_query_results.setdefault(seed_query_id, dict()).update(candidate_changes_and_scores)
# filter out entries that has already be annotated
for seed_query_id in aggregated_query_results:
candidate_ids = list(aggregated_query_results[seed_query_id].keys())
if seed_query_id in relevance_db:
not_annotated_candidate_ids = [id for id in candidate_ids if id not in relevance_db[seed_query_id]]
candidate_ids = not_annotated_candidate_ids
f_name = seed_query_id.replace('|', '_').replace('/', '_')
f_name = os.path.join(output_folder, f_name)
seed_query_example = dataset.get_example_by_id(seed_query_id)
with open(f_name, 'w') as f:
f.write(f'***Seed Query***\n{get_entry_str(seed_query_example)}\n\n\n')
f.write(f'{len(candidate_ids)} neighbors\n\n')
np.random.shuffle(candidate_ids)
for cand_id in candidate_ids:
example = dataset.get_example_by_id(cand_id)
f.write(get_entry_str(example, dist=0.0) + '\n')
def generate_reranked_list(model, relevance_db, dataset, output_folder):
if not os.path.exists(output_folder):
os.makedirs(output_folder)
ranked_lists = dict()
for seed_change_id, annotated_candidates in relevance_db.items():
print(f'processing {seed_change_id}')
seed_change_idx = [i for i, e in enumerate(dataset.examples) if e.id == seed_change_id][0]
seed_change = dataset.examples[seed_change_idx]
seed_change_feat_vec = model.code_change_encoder.encode_code_changes([seed_change], code_encoder=model.sequential_code_encoder, batch_size=1)[0]
candidate_distances = []
f_name = seed_change_id.replace('|', '_').replace('/', '_')
f_name = os.path.join(output_folder, f_name)
with open(f_name, 'w') as f:
f.write(f'***Seed Query***\n{get_entry_str(seed_change)}\n\n\n')
candidate_ids = list(annotated_candidates)
candidate_examples = [dataset.get_example_by_id(x) for x in candidate_ids]
cand_feature_vecs = model.code_change_encoder.encode_code_changes(candidate_examples, code_encoder=model.sequential_code_encoder, batch_size=256)
for candidate_id, candidate_example, candidate_feat_vec in zip(candidate_ids, candidate_examples, cand_feature_vecs):
# print(f'\tevaluate {candidate_id}')
dist = distance.cosine(seed_change_feat_vec, candidate_feat_vec)
# dist = get_distance(seed_change, candidate_example, seed_change_feat_vec, candidate_feat_vec)
assert not np.isnan(dist)
candidate_distances.append((candidate_id, dist))
ranked_candidates = sorted(candidate_distances, key=lambda x: x[1])
for candidate_id, dist in ranked_candidates:
candidate_score = relevance_db[seed_change_id][candidate_id]
candidate = dataset.get_example_by_id(candidate_id)
f.write(get_entry_str(candidate, dist=dist, score=candidate_score) + '\n')
ranked_lists[seed_change_id] = [candidate_id for candidate_id, dist in ranked_candidates]
save_to = os.path.join(output_folder, 'ranked_lists.bin')
pickle.dump(ranked_lists, open(save_to, 'bw'))
print(f'save results to {save_to}')
if __name__ == '__main__':
eval_folder = sys.argv[1]
print(f'evaluating folder {eval_folder}', file=sys.stderr)
files = filter(lambda x: x, os.listdir(eval_folder))
eval_files_scores = OrderedDict()
for eval_file in files:
print(f'evaluating {eval_file}', file=sys.stderr)
full_file_path = os.path.join(eval_folder, eval_file)
f = open(full_file_path)
line = f.readline()
assert line.startswith('***Seed Query***')
query_id = f.readline().strip()
query_id = query_id[len('Id:'):].strip()
print(f'\tseed query {query_id}', file=sys.stderr)
while not re.match('^\d+ neighbors', line):
line = f.readline().strip()
f.readline()
candidate_scores = []
while True:
line = f.readline()
if not line:
break
e_id = line[len('Id:'):].strip()
while not line.startswith('Score:'):
line = f.readline()
score = int(line[len('Score:'):].strip())
line = f.readline()
assert line.startswith('*****')
candidate_scores.append((e_id, score))
eval_files_scores[query_id] = candidate_scores
f.close()
print('', file=sys.stderr)
rank_scores = []
for query_id, candidate_scores in eval_files_scores.items():
rank_score = get_rank_score(candidate_scores)
print(f'{query_id}\t{rank_score}', file=sys.stderr)
rank_scores.append(rank_score)
print(f'\nAverage rank score: {np.average(rank_scores)}', file=sys.stderr)
|
'''
Created on Jul 16, 2018
@author: yiyedang
'''
import turtle
from Stack import Stack
class Disk:
def __init__(self, color, shape, width, x, y, speed):
self.x = x
self.y = y
self.shape = shape
self.color = color
self.t = turtle.Turtle()
self.height = 20
self.width = width
self.t.speed(speed)
self.moveto(x, y)
def moveto(self, x, y):
self.t.up()
self.t.goto(x, y)
self.t.down()
self.t.shape(self.shape)
self.t.color(self.color)
def getX(self):
return self.t.xcor()
def getY(self):
return self.t.ycor()
class Pole:
def __init__(self, x):
self.x = x
self.stack = Stack()
def draw_rectangle(t, color, width, height):
t.color(color, color)
t.begin_fill()
for i in range(2):
t.forward(width)
t.left(90)
t.forward(height)
t.left(90)
t.end_fill()
def draw_bk(t, l):
t.pensize(1)
t.speed(0)
t.up()
t.goto(-300, -100)
t.down()
draw_rectangle(t, '#804000', l, 30)
t.up()
t.left(90)
t.forward(l // 20)
t.right(90)
t.forward(80)
t.down()
for i in range(3):
draw_rectangle(t, 'brown', 5, 200)
t.up()
t.forward((l-160 - 5.0) // 2.0)
t.down()
def move_disk(source, target, disks):
tower1 = source.stack
tower2 = target.stack
if tower1.size() > 0:
tower2.push(tower1.pop())
for i in range(len(disks)):
disk = disks[i]
if disk.getX() == source.x:
size = tower2.size()
disk.moveto(target.x, -60 + (size - 1)* 20)
break
def hanoi(n, source, helper, target, disks):
if n > 0:
hanoi(n - 1, source, target, helper, disks)
move_disk(source, target, disks)
print(source.stack, helper.stack, target.stack)
hanoi(n - 1, helper, source, target, disks)
bk_t = turtle.Turtle()
wn = turtle.Screen()
draw_bk(bk_t, 600)
colormap = ['#FE007F','#96D7A0','#F88379','#ABE3E5','#C5A3FF',
'#6FC0AB','#FFB114', '#F3F298', '#F8C5D0']
disks = []
n = int(input("How many disks would you like to have? (0 < n < 10) "))
s = int(input("How fast would you like to move the disks? (0 < speed < 10, 0 is the fastest)")
for i in range(n, 0, -1):
width = 10 * (i + 1)
wn.register_shape('rectangle' + str(i), ((-10,-width),(-10,width),(10,width),(10,-width)))
t = Disk(colormap[i - 1], 'rectangle' + str(i), 20 *(i + 1), -220, -60 + 20*(n-i), s)
disks.insert(0, t)
source = Pole(-220)
helper = Pole(0)
target = Pole(220)
for i in range(n, 0, -1):
source.stack.push(i)
hanoi(n,source,helper,target, disks)
#print(target.x)
wn.exitonclick()
|
from pyprint.ConsolePrinter import ConsolePrinter
from coala_utils.string_processing.StringConverter import StringConverter
def ask_question(question,
default=None,
printer=ConsolePrinter(),
typecast=str,
**kwargs):
"""
Asks the user a question and returns the answer.
:param question:
String to be used as question.
:param default:
The default answer to be returned if the user gives a void answer
to the question.
:param printer:
The printer object used for console interactions. If this is not
given, it defaults to a ``ConsolePrinter``.
:param typecast:
Type to cast the input to. Defaults to a ``str``.
:param kwargs:
The additional keyword arguments are held for backwards compatibility
and for future use with the ``prompt_toolkit``.
:return:
The response from the user.
"""
while True:
printer.print(question, color="yellow", end=" ")
if default:
printer.print("[" + default + "]", end=" ")
printer.print("")
answer = input()
if default and len(answer) == 0:
answer = default
try:
answer = typecast(StringConverter(answer))
except BaseException as e:
printer.print(
str(e) + "\nPlease enter a valid answer.",
color="blue")
else:
return answer
|
from django.urls import path
from . import views
app_name = 'ask'
urlpatterns = [
path(r'continent/', views.continent, name='continent'),
path(r'continent_no_user/', views.continent_no_user, name='continent_no_user'),
path(r'region/', views.region, name='region'),
path(r'country/', views.country, name='country'),
path(r'river/', views.river, name='river'),
path(r'mountain/', views.mountain, name='mountain'),
path(r'forest/', views.forest, name='forest'),
path(r'disaster/', views.disaster, name='disaster'),
]
|
from django.contrib import admin
from apps.applications.models import Application
@admin.register(Application)
class ApplicationAdmin(admin.ModelAdmin):
"""
Defines the admin model for the Application Model
"""
list_display = ("__str__", "id", "application_owner", "updated_at")
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import os
import re
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.mac.common as common
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class mac_moddump(common.AbstractMacCommand):
""" Writes the specified kernel extension to disk """
def __init__(self, config, *args, **kwargs):
common.AbstractMacCommand.__init__(self, config, *args, **kwargs)
self._config.add_option('BASE', short_option = 'b', default = None, help = 'Dump driver with BASE address (in hex)', action = 'store', type = 'int')
self._config.add_option('REGEX', short_option = 'r', help = 'Dump modules matching REGEX', action = 'store', type = 'string')
self._config.add_option('IGNORE-CASE', short_option = 'i', help = 'Ignore case in pattern match', action = 'store_true', default = False)
self._config.add_option('DUMP-DIR', short_option = 'D', default = None, help = 'Output directory', action = 'store', type = 'str')
def calculate(self):
common.set_plugin_members(self)
if self._config.REGEX:
try:
if self._config.IGNORE_CASE:
mod_re = re.compile(self._config.REGEX, re.I)
else:
mod_re = re.compile(self._config.REGEX)
except re.error, e:
debug.error('Error parsing regular expression: {0}'.format(e))
if self._config.BASE:
module_address = int(self._config.BASE)
yield obj.Object("kmod_info", offset = module_address, vm = self.addr_space)
else:
modules_addr = self.addr_space.profile.get_symbol("_kmod")
modules_ptr = obj.Object("Pointer", vm = self.addr_space, offset = modules_addr)
mod = modules_ptr.dereference_as("kmod_info")
while mod.is_valid():
if self._config.REGEX and not mod_re.search(str(mod.name)):
mod = mod.next
continue
yield mod
mod = mod.next
def unified_output(self, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
return TreeGrid([("Address", Address),
("Size", int),
("Output Path", str),
], self.generator(data))
def generator(self, data):
for kmod in data:
start = kmod.address
size = kmod.m("size")
file_name = "{0}.{1:#x}.kext".format(kmod.name, kmod.obj_offset)
mod_file = open(os.path.join(self._config.DUMP_DIR, file_name), 'wb')
mod_data = self.addr_space.zread(kmod.address, size)
mod_file.write(mod_data)
mod_file.close()
yield(0, [
Address(start),
int(size),
str(file_name),
])
def render_text(self, outfd, data):
if (not self._config.DUMP_DIR or not os.path.isdir(self._config.DUMP_DIR)):
debug.error("Please specify an existing output dir (--dump-dir)")
self.table_header(outfd, [("Address", "[addrpad]"),
("Size", "8"),
("Output Path", "")])
for kmod in data:
start = kmod.address
size = kmod.m("size")
file_name = "{0}.{1:#x}.kext".format(kmod.name, kmod.obj_offset)
mod_file = open(os.path.join(self._config.DUMP_DIR, file_name), 'wb')
mod_data = self.addr_space.zread(kmod.address, size)
mod_file.write(mod_data)
mod_file.close()
self.table_row(outfd, start, size, file_name)
|
from os.path import expanduser
import toml
class Config:
def __init__(self):
self._path = '{}/.config/dtt/config.toml'.format(expanduser("~"))
self._toml_string = ""
try:
with open(self._path) as f:
self._toml_string = f.read()
except FileNotFoundError:
self._toml_string = ""
def __getitem__(self, item):
return self.to_dic[item]
@property
def to_dic(self):
return toml.loads(self._toml_string)
@property
def to_s(self):
return self._toml_string
|
# Algoritmo genético
from trabalho2.modules import calc_individuos, calc_populacao, calc_fitness, calc_fitness_filhos, calc_roletaSimplesProporcional, calc_pontoCorte, calc_mutacoes
from random import seed
from matplotlib import pyplot as plt
import numpy as np
len_pop = 100
num_individuos = 3000000
print('Tamanho da populacao: ', len_pop)
list_individuos = calc_individuos(num_individuos)
populacao = calc_populacao(list_individuos, len_pop)
fitness = []
roleta = []
pop_pais = []
pop_filhos = []
num_selecionados = 0
tc = 25
tm = 1
fitness_melhores = []
fitness_media = []
print('Taxa de cruzamento: ', tc, " %")
print('Taxa de mutacao: ', tm, " %")
calc_fitness(populacao)
for geracao in range(150):
seed()
print("------------------")
print("Geracao: ", geracao)
# ordenando a populacao pela fitness
populacao = sorted(populacao, reverse=True)
# define o numero de individuos da populacao de pais
num_selecionados = int((tc/100)*len_pop)
if num_selecionados < 2:
num_selecionados = 2
if not num_selecionados % 2 == 0:
num_selecionados +=1
# define a populacao de pais
pop_pais = calc_roletaSimplesProporcional(populacao,num_selecionados)
# cruzamento
pop_filhos = calc_pontoCorte(pop_pais,num_selecionados)
calc_fitness_filhos(pop_filhos)
for filho in pop_filhos:
populacao.append(filho)
#elitismo
populacao = sorted(populacao, reverse=True)
melhor_individuo = populacao[0]
n_mutacoes = int((tm / 100) * len(populacao))
if n_mutacoes < 1:
n_mutacoes = 1
print('n_mutacoes: ', n_mutacoes)
calc_mutacoes(populacao, n_mutacoes)
calc_fitness(populacao)
populacao = sorted(populacao, reverse=True)
# elitismo
populacao.insert(0, melhor_individuo)
populacao = populacao[:len_pop]
print("POPULACAO PARCIAL:")
print(populacao)
fitness_melhores.append(populacao[0][0])
fitness_media_temp = 0
for elem in populacao:
fitness_media_temp += elem[0]
fitness_media.append(fitness_media_temp/len_pop)
print("----------------------------------")
print("populacao final:" + '\n')
print(populacao)
#grafico: fitness do melhor ao longo das geracoes
x = np.arange(0, len(fitness_melhores), 1)
y = fitness_melhores
fig, ax = plt.subplots()
ax.plot(x, y)
fig.suptitle('Fitness do melhor individuo por geração') # Add a title so we know which it is
plt.show()
#grafico: fitness media
x = np.arange(0, len(fitness_media), 1)
y = fitness_media
fig, ax = plt.subplots()
ax.plot(x, y)
fig.suptitle('Fitness do média da população por geração') # Add a title so we know which it is
plt.show()
|
import pathlib
from typing import Sequence
import pandas as pd
from visions.relations import IdentityRelation, TypeRelation
from visions.types.type import VisionsBaseType
from visions.utils.series_utils import nullable_series_contains
def _get_relations(cls) -> Sequence[TypeRelation]:
from visions.types import Path
relations = [IdentityRelation(cls, Path)]
return relations
class File(VisionsBaseType):
"""**File** implementation of :class:`visions.types.type.VisionsBaseType`.
(i.e. existing path)
Examples:
>>> x = pd.Series([pathlib.Path('/home/user/file.txt'), pathlib.Path('/home/user/test2.txt')])
>>> x in visions.File
True
"""
@classmethod
def get_relations(cls) -> Sequence[TypeRelation]:
return _get_relations(cls)
@classmethod
@nullable_series_contains
def contains_op(cls, series: pd.Series) -> bool:
return all(isinstance(p, pathlib.Path) and p.exists() for p in series)
|
from etl.workflow.readers.ontolia_reader import read_ontolia_file
from tests.etl.workflow.readers.ontolia.expected_outputs import expected_raw_ontolia_output
from tests.util import convert_to_dataframe, assert_df_are_equal_ignore_id
def test_read_ontolia_file(spark_session):
ontolia_data_df = read_ontolia_file(spark_session, "tests/etl/workflow/readers/ontolia/test_ontolia_output.txt")
expected_df = convert_to_dataframe(spark_session, expected_raw_ontolia_output)
assert_df_are_equal_ignore_id(ontolia_data_df, expected_df)
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""PyYAML event listener
Contains class which interprets YAML events and forwards them to
a handler object.
"""
from google.appengine.api import yaml_errors
import yaml
_EVENT_METHOD_MAP = {
yaml.events.StreamStartEvent: 'StreamStart',
yaml.events.StreamEndEvent: 'StreamEnd',
yaml.events.DocumentStartEvent: 'DocumentStart',
yaml.events.DocumentEndEvent: 'DocumentEnd',
yaml.events.AliasEvent: 'Alias',
yaml.events.ScalarEvent: 'Scalar',
yaml.events.SequenceStartEvent: 'SequenceStart',
yaml.events.SequenceEndEvent: 'SequenceEnd',
yaml.events.MappingStartEvent: 'MappingStart',
yaml.events.MappingEndEvent: 'MappingEnd',
}
class EventHandler(object):
"""Handler interface for parsing YAML files.
Implement this interface to define specific YAML event handling class.
Implementing classes instances are passed to the constructor of
EventListener to act as a receiver of YAML parse events.
"""
def StreamStart(self, event, loader):
"""Handle start of stream event"""
def StreamEnd(self, event, loader):
"""Handle end of stream event"""
def DocumentStart(self, event, loader):
"""Handle start of document event"""
def DocumentEnd(self, event, loader):
"""Handle end of document event"""
def Alias(self, event, loader):
"""Handle alias event"""
def Scalar(self, event, loader):
"""Handle scalar event"""
def SequenceStart(self, event, loader):
"""Handle start of sequence event"""
def SequenceEnd(self, event, loader):
"""Handle end of sequence event"""
def MappingStart(self, event, loader):
"""Handle start of mapping event"""
def MappingEnd(self, event, loader):
"""Handle end of mapping event"""
class EventListener(object):
"""Helper class to re-map PyYAML events to method calls.
By default, PyYAML generates its events via a Python generator. This class
is a helper that iterates over the events from the PyYAML parser and forwards
them to a handle class in the form of method calls. For simplicity, the
underlying event is forwarded to the handler as a parameter to the call.
This object does not itself produce iterable objects, but is really a mapping
to a given handler instance.
Example use:
class PrintDocumentHandler(object):
def DocumentStart(event):
print "A new document has been started"
EventListener(PrintDocumentHandler()).Parse('''
key1: value1
---
key2: value2
'''
>>> A new document has been started
A new document has been started
In the example above, the implemented handler class (PrintDocumentHandler)
has a single method which reports each time a new document is started within
a YAML file. It is not necessary to subclass the EventListener, merely it
receives a PrintDocumentHandler instance. Every time a new document begins,
PrintDocumentHandler.DocumentStart is called with the PyYAML event passed
in as its parameter..
"""
def __init__(self, event_handler):
"""Initialize PyYAML event listener.
Constructs internal mapping directly from event type to method on actual
handler. This prevents reflection being used during actual parse time.
Args:
event_handler: Event handler that will receive mapped events. Must
implement at least one appropriate handler method named from
the values of the _EVENT_METHOD_MAP.
Raises:
ListenerConfigurationError if event_handler is not an EventHandler.
"""
if not isinstance(event_handler, EventHandler):
raise yaml_errors.ListenerConfigurationError(
'Must provide event handler of type yaml_listener.EventHandler')
self._event_method_map = {}
for event, method in _EVENT_METHOD_MAP.iteritems():
self._event_method_map[event] = getattr(event_handler, method)
def HandleEvent(self, event, loader=None):
"""Handle individual PyYAML event.
Args:
event: Event to forward to method call in method call.
Raises:
IllegalEvent when receives an unrecognized or unsupported event type.
"""
if event.__class__ not in _EVENT_METHOD_MAP:
raise yaml_errors.IllegalEvent(
"%s is not a valid PyYAML class" % event.__class__.__name__)
if event.__class__ in self._event_method_map:
self._event_method_map[event.__class__](event, loader)
def _HandleEvents(self, events):
"""Iterate over all events and send them to handler.
This method is not meant to be called from the interface.
Only use in tests.
Args:
events: Iterator or generator containing events to process.
raises:
EventListenerParserError when a yaml.parser.ParserError is raised.
EventError when an exception occurs during the handling of an event.
"""
for event in events:
try:
self.HandleEvent(*event)
except Exception, e:
event_object, loader = event
raise yaml_errors.EventError(e, event_object)
def _GenerateEventParameters(self,
stream,
loader_class=yaml.loader.SafeLoader):
"""Creates a generator that yields event, loader parameter pairs.
For use as parameters to HandleEvent method for use by Parse method.
During testing, _GenerateEventParameters is simulated by allowing
the harness to pass in a list of pairs as the parameter.
A list of (event, loader) pairs must be passed to _HandleEvents otherwise
it is not possible to pass the loader instance to the handler.
Also responsible for instantiating the loader from the Loader
parameter.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work.
Loader: Loader class to use as per the yaml.parse method. Used to
instantiate new yaml.loader instance.
Yields:
Tuple(event, loader) where:
event: Event emitted by PyYAML loader.
loader_class: Used for dependency injection.
"""
assert loader_class is not None
try:
loader = loader_class(stream)
while loader.check_event():
yield (loader.get_event(), loader)
except yaml.error.YAMLError, e:
raise yaml_errors.EventListenerYAMLError(e)
def Parse(self, stream, loader_class=yaml.loader.SafeLoader):
"""Call YAML parser to generate and handle all events.
Calls PyYAML parser and sends resulting generator to handle_event method
for processing.
Args:
stream: String document or open file object to process as per the
yaml.parse method. Any object that implements a 'read()' method which
returns a string document will work with the YAML parser.
loader_class: Used for dependency injection.
"""
self._HandleEvents(self._GenerateEventParameters(stream, loader_class))
|
from flask import Flask, Response, request
import os
import json
import logging
import pika
from entity_json import entities_to_json
import xmltodict
app = Flask(__name__)
logger = logging.getLogger('service')
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
config = json.loads(os.environ["CONFIG"])
queue = config["queue"]
decode_json_body = config.get("decode_json_value", False)
decode_xml_body = config.get("decode_xml_body", False)
timeout = config.get("inactivity_timeout_seconds", 1)
username = config.get("username", "guest")
password =config.get("password", "guest")
hostname = config.get("hostname", "localhost")
port = config.get("port", 5672)
virtual_host = config.get("virtual_host", "/")
credentials = pika.PlainCredentials(username, password)
parameters = pika.ConnectionParameters(hostname,
port,
virtual_host,
credentials)
@app.route('/', methods=["GET"])
def get():
limit = request.args.get("limit")
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
def generate():
yield "["
index = 0
for method_frame, properties, body in channel.consume(queue, inactivity_timeout=timeout):
if method_frame is None:
break
if index > 0:
yield ","
body_result = body
if decode_json_body:
body_result = json.loads(body.decode('utf-8'))
elif decode_xml_body:
body_result = xmltodict.parse(body.decode('utf-8'))
result = {
# dummy to prevent full sync deletion tracking
"_updated": 0,
"properties": properties.__dict__,
"body": body_result,
}
yield entities_to_json(result)
# TODO unsafe ack
channel.basic_ack(method_frame.delivery_tag)
index = index + 1
if limit and index >= int(limit):
break
yield "]"
channel.close()
connection.close()
return Response(generate(), mimetype='application/json', )
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
|
import numpy as np
from pymul.layers.layer import Layer
class NeuronLayer(Layer):
def __init__(self, input_size, output_size):
self.weights = np.random.rand(input_size, output_size) * 2 - 1
self.biases = np.random.rand(1, output_size) * 2 - 1
def forward_propagate(self, inputs):
self.inputs = inputs
self.outputs = np.dot(self.inputs, self.weights) + self.biases
return self.outputs
def backward_propagate(self, errors, learning_rate):
input_errors = np.dot(errors, self.weights.T)
weight_errors = np.dot(self.inputs.T, errors)
self.weights -= weight_errors * learning_rate
self.biases -= errors * learning_rate
return input_errors
|
'''
get_transform(args, eval_stage)
get_dataset(args, transform, eval_stage)
In pretraining stage, eval_stage set to 'none'
'''
from metric.stat_metric import StatMetric
from dataloader import get_transform
from dataloader import get_dataset
from ckpt import get_model_ckpt, save_ckpt
from model import get_model
from loss import get_loss
from optimizer import get_optimizer, get_sub_optimizer, get_scheduler
from metric import get_metrics
from utils import prepare_batch
from logger import get_logger, log_results, log_results_cmd
from ignite.engine.engine import Engine, State, Events
from ignite.metrics import Loss
import numpy as np
# from apex import amp
import ignite.distributed as idist
from ignite.contrib.engines import common
def get_trainer(args, model, loss_fn, optimizer, scheduler):
def update_model(trainer, batch):
model.train()
optimizer.zero_grad()
# to gpu
net_inputs, target = prepare_batch(args, batch)
# ** : dictionary input to each argument
# y_pred : dict {z_i, z_j, p_i, p_j}
x_i = net_inputs['x_i']
z_i, p_i = model(x_i)
del x_i
x_j = net_inputs['x_j']
z_j, p_j = model(x_j)
#y_pred = model(**net_inputs)
del net_inputs, x_j
y_pred = {'p_i': p_i, 'p_j': p_j, 'z_i': z_i, 'z_j':z_j}
batch_size = target.shape[0] # N
loss = loss_fn(y_pred)
#loss = loss.mean() # ddp
#with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:
# scaled_loss.backward()
loss.backward()
optimizer.step()
scheduler.step()
return loss.item(), batch_size, y_pred.detach()
trainer = Engine(update_model)
metrics = {
'loss': Loss(loss_fn=loss_fn,output_transform=lambda x:(x[0], x[1])),
}
for name, metric in metrics.items():
metric.attach(trainer, name)
return trainer
def pretrain(args):
tf = get_transform(args, 'none')
ds = get_dataset(args, tf, 'none')
args, model, ckpt_available = get_model_ckpt(args)
if ckpt_available:
print("loaded checkpoint {} in pretraining stage".format(args.ckpt_name))
loss_fn = get_loss(args)
sub_optimizer = get_sub_optimizer(args, model)
optimizer = get_optimizer(args, sub_optimizer)
scheduler = get_scheduler(args, optimizer)
# setup nvidia/apex amp
# model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level, num_losses=1)
# model = idist.auto_model(model)
trainer = get_trainer(args, model, loss_fn, optimizer, scheduler)
metrics = get_metrics(args)
logger = get_logger(args)
@trainer.on(Events.STARTED)
def on_training_started(engine):
print("Begin Pretraining")
# batch-wise
@trainer.on(Events.ITERATION_COMPLETED)
def log_iter_results(engine):
log_results(logger, 'pretrain/iter', engine.state, engine.state.iteration)
# epoch-wise (ckpt)
@trainer.on(Events.EPOCH_COMPLETED)
def save_epoch(engine):
log_results(logger, 'pretrain/epoch', engine.state, engine.state.epoch)
log_results_cmd(logger, 'pretrain/epoch', engine.state, engine.state.epoch)
save_ckpt(args, engine.state.epoch, engine.state.metrics['loss'], model)
trainer.run(ds, max_epochs=args.epoch)
|
""" Provides a solution (`solve`) to the EMST problem. """
from .edist import edist
from operator import itemgetter
# Euclidean Minimum Spanning Tree (MST) algorithm
#
# input: a list of n Point objects
#
# output: a list of (p, q) tuples, where p and q are each input Point
# objects, and (p, q) should be connected in a minimum spanning tree
# of the input points
def solve(points):
""" Solves the EMST problem """
# it's not a list
if not isinstance(points, list):
raise TypeError("solve expects a list of n Point objects, received %s" % points)
plen = len(points)
if plen < 2:
return []
# preallocate a simple map to tell us whether a Point is spanned
spanned = [False] * plen
# span the first point
spanned[0] = True
edges = []
result = []
for lkey, left in enumerate(points):
for rkey, right in enumerate(points):
#if left != right:
edges.append((lkey, rkey, edist(left, right)))
edges.sort(key=itemgetter(2))
while len(result) < plen - 1:
for edge in edges:
lkey, rkey, _ = edge
if spanned[lkey] != spanned[rkey]:
result.append((points[lkey], points[rkey]))
spanned[lkey] = spanned[rkey] = True
break
return result
|
import os
import pickle
import time
import torchvision.utils as vutils
from torch import optim
from torch.utils.data import DataLoader
from zo.models import Discriminator, Generator, device
from zo.zo_opt import GradientEstimate_dicrs, GradientEstimate, zoVIA, zoESVIA, zoscESVIA
from zo.log_likelihood import log_likelihood
from zo.plot import *
from zo.utils import *
train_data, valid_data, test_data = get_data()
real_label = 1.
fake_label = 0.
nz = 100 # Size of z latent vector (i.e. size of generator input)
fixed_noise = torch.randn(20, nz, 1, 1, device=device)
print('Device: {}'.format(device))
print('Example of train samples:')
show_images(train_data[:10][0])
def choose_optimizer(discriminator, generator, netD, netG, lr_d=2e-4, lr_g=2e-3):
"""
Set optimizers for discriminator and generator
:param discriminator: str, name
:param generator: str, name
:param netD:
:param netG:
:param lr_d:
:param lr_g:
:return: optimizerD, optimizerG
"""
if discriminator == 'Adam':
optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
elif discriminator == 'RMSprop':
optimizerD = optim.RMSprop(netD.parameters(), lr=lr_d)
elif discriminator == 'SGD':
optimizerD = optim.SGD(netD.parameters(), lr=lr_d, momentum=0.9)
elif discriminator == 'zoVIA':
optimizerD = zoVIA(netD, lr=lr_d)
elif discriminator == 'zoESVIA':
optimizerD = zoESVIA(netD, lr=lr_d)
elif discriminator == 'zoscESVIA':
optimizerD = zoscESVIA(netD, lr=lr_d)
if generator == 'Adam':
optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
elif generator == 'RMSprop':
optimizerG = optim.RMSprop(netG.parameters(), lr=lr_g)
elif generator == 'SGD':
optimizerG = optim.SGD(netG.parameters(), lr=lr_g, momentum=0.9)
elif generator == 'zoVIA':
optimizerG = zoVIA(netG, lr=lr_g)
elif generator == 'zoESVIA':
optimizerG = zoESVIA(netG, lr=lr_g)
elif generator == 'zoscESVIA':
optimizerG = zoscESVIA(netG, lr=lr_g)
print('Discriminator optimizer: {}, lr={}'.format(discriminator, lr_d))
print('Generator optimizer: {}, lr={}'.format(generator, lr_g))
return optimizerD, optimizerG
def train_model(valid_data, test_data, dataloader,
netD, netG, optimizerD, optimizerG,
num_epochs=10, discr_zo=False, gener_zo=False,
batch_size=32, tau=0.000001,
change_opt=(-1, -1, 'Adam', 'SGD', 2e-4, 2e-4),
img_every_epoch=False, log_like=True):
"""
Train GAN function
:param valid_data:
:param test_data:
:param dataloader:
:param netD: Discriminator network
:param netG: Generator network
:param optimizerD: Discriminator optimizer
:param optimizerG: Generator optimizer
:param num_epochs:
:param discr_zo: Discriminator optimizer Zero-order, bool
:param gener_zo: Generator optimizer Zero-order, bool
:param batch_size:
:param tau:
:return: gan, img_list
"""
EPOCH_ZO_D, EPOCH_ZO_G, optimD_begin, optimG_begin, lr_d_begin, lr_g_begin = change_opt
#EPOCH_ZO_D = -1
#EPOCH_ZO_G = -1
img_list = []
G_losses, D_losses = [], []
log_likelihoods = []
iters = 0
criterion = nn.BCELoss()
print("Starting Training Loop...")
if log_like:
generated_samples = generate_many_samples(netG, 512, batch_size).detach().cpu()
valid_samples = valid_data[np.random.choice(len(valid_data), 512, False)][0]
# valid_samples = valid_samples.to(next(model.parameters()).device)
test_samples = test_data[np.random.choice(len(test_data), 512, False)][0]
# test_samples = test_samples.to(next(model.parameters()).device)
ll = log_likelihood(generated_samples, valid_samples, test_samples)
else:
ll = 1
log_likelihoods.append(ll)
print('Log-likelihood before training: ', ll, flush=True)
print('\n')
# For each epoch
for epoch in range(num_epochs):
print('EPOCH #{}'.format(epoch+1))
start_epoch = time.time()
# For each batch in the dataloader
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
if discr_zo and epoch > EPOCH_ZO_D:
gradsD_real = GradientEstimate_dicrs(netD, real_cpu, label, criterion, tau)
D_x = output.mean().item()
elif discr_zo and epoch <= EPOCH_ZO_D:
if optimD_begin == 'SGD':
optimizerD_begin = optim.SGD(netD.parameters(), lr=lr_d_begin, momentum=0.9)
elif optimD_begin == 'Adam':
optimizerD_begin = optim.Adam(netD.parameters(), lr=lr_d_begin, betas=(0.5, 0.999))
errD_real.backward()
D_x = output.mean().item()
else:
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
if discr_zo and epoch > EPOCH_ZO_D:
gradsD_fake = GradientEstimate_dicrs(netD, fake.detach(), label, criterion, tau)
# print(grads_g)
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
gradsD = gradsD_real + gradsD_fake
optimizerD.step_update(netD, gradsD)
elif discr_zo and epoch <= EPOCH_ZO_D:
if optimD_begin == 'SGD':
optimizerD_begin = optim.SGD(netD.parameters(), lr=lr_d_begin, momentum=0.9)
elif optimD_begin == 'Adam':
optimizerD_begin = optim.Adam(netD.parameters(), lr=lr_d_begin, betas=(0.5, 0.999))
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimizerD_begin.step()
else:
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
if gener_zo and epoch > EPOCH_ZO_G:
# if gener_zo:
# grads_g = GradientEstimate(netD, fake, label, criterion)
grads_g = GradientEstimate(netG, netD, noise, label, criterion, tau)
D_G_z2 = output.mean().item()
optimizerG.step_update(netG, grads_g)
elif gener_zo and epoch <= EPOCH_ZO_G:
if optimG_begin == 'SGD':
optimizerG_begin = optim.SGD(netG.parameters(), lr=lr_g_begin, momentum=0.9)
elif optimG_begin == 'Adam':
optimizerG_begin = optim.Adam(netG.parameters(), lr=lr_g_begin, betas=(0.5, 0.999))
#optimizerG_01ep = optim.Adam(netG.parameters(), lr=2e-4, betas=(0.5, 0.999))
errG.backward()
D_G_z2 = output.mean().item()
optimizerG_begin.step()
else:
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
# Output training stats
if i % 200 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch + 1, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
if img_every_epoch:
print('after epoch {}'.format(epoch+1))
show_images(netG.to(device).generate_samples(10))
print('EPOCH #{} time:'.format(epoch+1))
timer(start_epoch, time.time())
if log_like:
generated_samples = generate_many_samples(netG, 512, batch_size).detach().cpu()
valid_samples = valid_data[np.random.choice(len(valid_data), 512, False)][0]
# valid_samples = valid_samples.to(next(model.parameters()).device)
test_samples = test_data[np.random.choice(len(test_data), 512, False)][0]
# test_samples = test_samples.to(next(model.parameters()).device)
ll = log_likelihood(generated_samples, valid_samples, test_samples)
else:
ll = 1
log_likelihoods.append(ll)
print('Log-likelihood {} for epoch #{} \n'.format(ll, epoch+1))
return {
'netDiscriminator': netD.cpu(),
'netGenerator': netG.cpu(),
'generator_losses': G_losses,
'discriminator_losses': D_losses,
'log_likelihoods': log_likelihoods
}, img_list
def main(optD, optG, num_epochs=5,
discr_zo=False, gener_zo=False, save=True,
tau=0.000001, lr_d=2e-4, lr_g=2e-3, batch_size=32,
change_opt=(-1, -1, 'Adam', 'SGD', 2e-4, 2e-4),
img_every_epoch=False, log_like=True):
"""
Make main experiment
:param optD: str,
name of discriminator optimizer
:param optG: str,
name of generator optimizer
:param num_epochs: int,
number of epochs, default=5
:param discr_zo: bool,
True if discriminator optimizer is zero-order,
False otherwise, default=False
:param gener_zo: bool,
True if generator optimizer is zero-order,
False otherwise, default=False
:param save: bool,
if True save model and images, default=True
:param tau: float,
parameter for zo optimizer, default=0.000001
:param lr_d: float,
learning rate for discriminator optimizer, default=2e-4
:param lr_g: float,
learning rate for generator optimizer, default=2e-4
:param batch_size: int,
number of samples in batch, default=32,
:param change_opt: tuple, default=(-1,-1, 'Adam', 'SGD', 2e-4, 2e-4),
tuple with parameters EPOCH_ZO_D, EPOCH_ZO_G, optimD_begin, optimG_begin, lr_d_begin, lr_g_begin
parameters for changing optimizer during training
EPOCH_ZO_D: int, epoch to change begin discriminator optimizer to ZO optimizer
EPOCH_ZO_G: int, epoch to change begin generator optimizer to ZO optimizer
optimD_begin: str, name of discriminator optimizer to start with
optimG_begin: str, name of generator optimizer to start with
lr_d_begin: float, learning rate for discriminator optimizer in the beginning of train
lr_g_begin: float, learning rate for generator optimizer in the beginning of train
:param img_every_epoch: bool,
if True show generator images after every epoch, default=False
:param ll: bool,
if True count log-likelihood every epoch, default=True
:return: gan, img_list
"""
if not os.path.exists('./experiments/'):
os.makedirs('./experiments/')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Device: {}'.format(device))
netG = Generator().to(device)
netD = Discriminator().to(device)
# print(netG, netD)
dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
optimizerD, optimizerG = choose_optimizer(optD, optG, netD, netG,
lr_d=lr_d, lr_g=lr_g)
gan, img_list = train_model(valid_data, test_data, dataloader,
netD, netG, optimizerD, optimizerG,
num_epochs=num_epochs, discr_zo=discr_zo,
gener_zo=gener_zo, batch_size=batch_size,
tau=tau, change_opt=change_opt,
img_every_epoch=img_every_epoch, log_like=log_like)
show_images(gan['netGenerator'].to(device).generate_samples(40))
plot_losses(gan['generator_losses'], gan['discriminator_losses'], optD, optG, save=True)
plot_ll(gan, optD, optG, save=True)
if save:
path = optD + '_' + optG + '_' + str(num_epochs) + 'ep.pickle'
with open('./experiments/gan_' + path, 'wb') as handle:
pickle.dump(gan, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Model saved at {}'.format('./experiments/gan_' + path))
with open('./experiments/imgs_' + path, 'wb') as handle:
pickle.dump(img_list, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Control images for generator saved at {}'.format('./experiments/imgs_' + path))
return gan, img_list
|
import pytest
import numpy as np
import torch
import network
import flow
import variational
import time
import itertools
from torch import nn
torch.manual_seed(2)
use_gpu = True
device = torch.device('cuda:0' if use_gpu else 'cpu')
@pytest.fixture(params=[4])
def L(request):
return request.param
def conv2d_coupling(L):
mask = get_mask(L)
coupling = flow.RealNVPInverseAndLogProb(
hidden_size=16,
kernel_size=3,
mask=mask)
return coupling.to(device)
def get_mask(L):
return torch.from_numpy(network.mask.checkerboard((L, L))).to(device)
def to_numpy(tensor):
return tensor.cpu().numpy().astype(np.float32)
def _get_memory():
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated()
memory = torch.cuda.memory_allocated()
return memory / 10**9, max_memory / 10**9
def fast_coupling():
coupling = flow.RealNVPFastInverseAndLogProb(hidden_size=16)
return coupling.to(device)
def perm_coupling(L, parity=False, num_blocks=1):
modules = [flow.CheckerSplit((L, L))]
for _ in range(num_blocks):
modules.append(flow.RealNVPPermuteInverseAndLogProb(in_channels=1,
hidden_size=16,
parity=parity))
modules.append(flow.CheckerConcat((L, L)))
net = flow.RealNVPSequential(*modules)
return net.to(device)
def _test_speeds(L, num_blocks=1):
print('\n')
conv2d = flow.FlowSequential(*[conv2d_coupling(L) for _ in range(num_blocks)])
t0 = time.time()
_test_coupling(conv2d, L)
print(f'L: {L}\t slow:\t{time.time() - t0:.3f} s')
fast = flow.FlowSequential(*[fast_coupling() for _ in range(num_blocks)])
t0 = time.time()
_test_coupling(fast, L)
print(f'L: {L}\tfast:\t{time.time() - t0:.3f} s')
perm = perm_coupling(L, num_blocks)
t0 = time.time()
_test_coupling(fast, L)
print(f'L: {L}\tfast:\t{time.time() - t0:.3f} s')
def _test_memory_conv(L, num_blocks=1):
p = torch.distributions.Normal(0, 1)
x = p.sample((L, L)).to(device)
x = x.unsqueeze(0)
m0, max0 = _get_memory()
net = flow.FlowSequential(*[conv2d_coupling(L) for _ in range(num_blocks)])
m1, max1 = _get_memory()
print('init mem, max:', m1 - m0, max1 - max0)
y, log_x = net(x)
m2, max2 = _get_memory()
print('fwd mem, max:', m2 - m1, max2 - max1)
def _test_memory_fast(L, num_blocks):
p = torch.distributions.Normal(0, 1)
x = p.sample((L, L)).to(device)
x = x.unsqueeze(0)
m0, max0 = _get_memory()
net = flow.FlowSequential(*[fast_coupling() for _ in range(num_blocks)])
m1, max1 = _get_memory()
print('init mem, max:', m1 - m0, max1 - max0)
y, log_x = net(x)
m2, max2 = _get_memory()
print('fwd mem, max:', m2 - m1, max2 - max1)
def _test_memory_perm(L, num_blocks):
p = torch.distributions.Normal(0, 1)
x = p.sample((L, L)).to(device)
x = x.unsqueeze(0)
m0, max0 = _get_memory()
modules = [flow.CheckerSplit((L, L))]
for _ in range(num_blocks):
modules.append(flow.RealNVPPermuteInverseAndLogProb(in_channels=1, hidden_size=16))
modules.append(flow.CheckerConcat((L, L)))
net = flow.RealNVPSequential(*modules).to(device)
m1, max1 = _get_memory()
print('init mem, max:', m1 - m0, max1 - max0)
y, log_x = net(x)
m2, max2 = _get_memory()
print('fwd mem, max:', m2 - m1, max2 - max1)
def _test_coupling(coupling, L):
p = torch.distributions.Normal(0, 1)
x = p.sample((L, L)).to(device)
x = x.unsqueeze(0)
y, log_x = coupling(x)
x_pred, log_x_pred = coupling.inverse(y)
#print(x_pred, '\n', x)
assert torch.allclose(x_pred, x, rtol=0.01)
#print(log_x_pred, log_x)
assert torch.allclose(log_x_pred, log_x, rtol=0.01)
def test_fast_parity(L):
coupling = flow.RealNVPFastInverseAndLogProb(hidden_size=16, parity=True)
coupling.to(device)
_test_coupling(coupling, L)
def test_perm_parity(L):
_test_coupling(perm_coupling(L, parity=True), L)
def test_prior():
q_nu = variational.RealNVPPrior(latent_shape=(4, 4),
flow_depth=6,
hidden_size=16,
flow_std=1.0)
nu_0 = q_nu.sample_base_distribution(1)
log_q_nu_0 = q_nu.q_nu_0.log_prob(nu_0).sum((1, 2))
nu, log_q_nu = q_nu.q_nu(nu_0)
nu_0_pred, log_q_nu_pred = q_nu.q_nu.inverse(nu)
log_q_nu_0_pred = q_nu.q_nu_0.log_prob(nu_0_pred).sum((1, 2))
for pred, actual in [(nu_0_pred, nu_0),
(log_q_nu_pred, log_q_nu),
(log_q_nu_0_pred, log_q_nu_0)]:
print(pred, actual)
assert torch.allclose(pred, actual)
def test_posterior():
r_nu = variational.RealNVPPosterior(latent_shape=(4, 4),
flow_depth=6,
hidden_size=16,
flow_std=1.0)
nu = torch.randn(1, 4, 4)
z = torch.round(torch.rand(1, 4, 4))
nu_0, log_r_nu_0, log_r_nu = r_nu.inverse_and_log_prob(nu, z)
nu_pred, log_r_nu_pred = r_nu.r_nu.inverse(nu_0)
for pred, actual in [(nu_pred, nu),
(log_r_nu_pred, log_r_nu)]:
print(pred, actual)
assert torch.allclose(pred, actual)
def get_jacobian(net, x, noutputs):
"""From https://gist.github.com/sbarratt/37356c46ad1350d4c30aefbd488a4faa"""
x = x.squeeze()
n = x.size()[0]
x = x.repeat(noutputs, 1)
x.requires_grad_(True)
y = net(x)
y.backward(torch.eye(noutputs))
return x.grad.data
def test_rectangle_shapes(L):
net = network.Conv2dRect(in_channels=1, hidden_size=16, inner_activation='leaky_relu', final_activation=None)
x = torch.round(torch.rand(1, L, L // 2)) * 2 - 1
out = net(x)
print(L, out.shape)
assert tuple(out.shape) == (1, L, L // 2)
def get_jacobian(net, x, noutputs):
"""From https://gist.github.com/sbarratt/37356c46ad1350d4c30aefbd488a4faa"""
x = x.squeeze()
n = x.size()[0]
x = x.repeat(noutputs, 1)
x.requires_grad_(True)
y, _ = net(x)
y.backward(torch.eye(noutputs))
return x.grad.data
def _test_jacobian(coupling, L):
mask = get_mask(L)
# invert the mask
np_mask = (~to_numpy(mask).astype(bool)).astype(np.float32)
p = torch.distributions.Normal(0, 1)
x = p.sample((L, L)).to(device)
x = x.unsqueeze(0)
x.requires_grad = True
y, log_x = coupling(x)
J = np.zeros((L ** 2, L ** 2))
y = y.squeeze().flatten()
for i in range(L ** 2):
for j in range(L ** 2):
y[i].backward(retain_graph=True)
J[i, j] = x.grad.flatten()[j].item()
x.grad.zero_()
log_det_J = np.log(np.abs(np.linalg.det(J)))
print(log_det_J, log_x)
#assert np.allclose(log_det_J, log_x.item())
input_vars = np.where(J.sum(1) == 1)[0]
# realnvp only takes half of the input variables
assert len(input_vars) == L ** 2 // 2
# other half of the variables depend on the input
dependent_vars = list(filter(lambda i: i not in input_vars, range(L ** 2)))
assert len(dependent_vars) == L ** 2 // 2
for i in dependent_vars:
row = J[i]
# the variable depends on itself in the realnvp transform
row[i] = 0
arr = row.reshape((L, L))
recovered_mask = (arr != 0).astype(np.float32)
print('reconstructed mask for variable ', i)
print(recovered_mask)
assert np.array_equal(recovered_mask, np_mask) or np.array_equal(recovered_mask, 1 - np_mask)
def test_fast_jacobian(L):
coupling = fast_coupling()
_test_jacobian(coupling, L)
def test_perm_jacobian(L):
coupling = perm_coupling(L)
_test_jacobian(coupling, L)
if __name__ == '__main__':
L = 1024
test_posterior()
test_prior()
#_test_speeds(L, num_blocks=2)
# coupling = perm_coupling(L, parity=True)
# _test_coupling(coupling, L)
# _test_jacobian(coupling, L)
#coupling = fast_coupling()
#_test_jacobian(coupling, 4)
#_test_jacobian(coupling, 4)
#test_fast(4)
# num_blocks = 5
# for L in [512, 1024]:
# print('\n--------------\nL = ', L)
# # print('\nconv:')
# # _test_memory_conv(L, num_blocks)
# print('\nfast, with roll')
# _test_memory_fast(L, num_blocks)
# torch.cuda.reset_max_memory_allocated()
# print('\npermutations')
# _test_memory_perm(L, num_blocks)
# torch.cuda.reset_max_memory_allocated()
#test_conv2d_jacobian()
#test_fast_coupling()
#test_fast_coupling()
#test_fast_jacobian()
|
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
try:
import numpy as np
except:
exit()
import random
rnd = random.Random()
rnd.seed(128)
from surrogate.benchmarks import movingpeaks
sc = movingpeaks.SCENARIO_1
sc["uniform_height"] = 0
sc["uniform_width"] = 0
mp = movingpeaks.MovingPeaks(dim=2, random=rnd, **sc)
fig = plt.figure()
ax = Axes3D(fig)
X = np.arange(0, 100, 1.0)
Y = np.arange(0, 100, 1.0)
X, Y = np.meshgrid(X, Y)
Z = np.fromiter(map(lambda x: mp(x)[0], zip(X.flat, Y.flat)), dtype=np.float, count=X.shape[0] * X.shape[1]).reshape(
X.shape)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.jet, linewidth=0.2)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
|
from django.contrib import admin
from ticketing.models import *
class BrownPaperSettingsAdmin(admin.ModelAdmin):
list_display = ('developer_token', 'client_username', 'last_poll_time')
class PayPalSettingsAdmin(admin.ModelAdmin):
list_display = ('business_email', )
class TransactionAdmin(admin.ModelAdmin):
list_display = ('ticket_item',
'purchaser',
'amount',
'order_date',
'import_date')
list_filter = ['ticket_item__ticketing_event__conference',
'order_date',
'import_date',
'ticket_item__ticketing_event__act_submission_event',
'ticket_item__ticketing_event__vendor_submission_event']
search_fields = ['ticket_item__title',
'purchaser__matched_to_user__username',
'purchaser__email']
class PurchaserAdmin(admin.ModelAdmin):
list_display = ('pk',
'matched_to_user',
'first_name',
'last_name',
'email',
'phone')
list_filter = ['state',
'country']
search_fields = ['matched_to_user__username',
'first_name',
'last_name',
'email']
class TicketItemAdmin(admin.ModelAdmin):
list_display = ('title',
'ticketing_event',
'ticket_id',
'active',
'cost',
'datestamp',
'modified_by',
'conference')
list_filter = ['datestamp',
'modified_by',
'ticketing_event',
'live',
'has_coupon']
search_fields = ['title',
'ticketing_event__title',
'ticketing_event__conference__conference_name',
'ticketing_event__conference__conference_slug']
def conference(self, obj):
return obj.ticketing_event.conference
def active(self, obj):
return obj.active
class DetailInline(admin.TabularInline):
model = EventDetail
class TicketingEventsAdmin(admin.ModelAdmin):
filter_horizontal = ("linked_events",)
search_fields = ('title', 'event_id')
list_display = ('title',
'event_id',
'act_submission_event',
'vendor_submission_event',
'include_conference',
'include_most')
list_filter = ['conference',
'source',
'act_submission_event',
'vendor_submission_event',
]
inlines = [
DetailInline,
]
fieldsets = (
("Control Fields", {
'fields': ('event_id', 'conference',),
'description': '''Use the event id from BPT. Conference controls
where events are displayed - only active/upcoming conferences
are synced.''',
}),
('Event Links', {
'fields': ('act_submission_event',
'vendor_submission_event',
'include_conference',
'include_most',
"linked_events",),
'description': '''Rules for what this ticket gives. Controls
when it's advertised and special actions like act/vendor submit
''',
}),
("Registration", {
'fields': ('ticket_style', ),
'description': '''Older rules for registration.''',
'classes': ('collapse',),
}),
("Display Text", {
'fields': ('display_icon', 'title', 'description'),
'description': '''What is shown on the 'I Want to Buy Tickets'
page. Description is not shown there, it's pulled from
ticket source but not shown. Display Icon must come from
https://simplelineicons.github.io/ or
https://icons.getbootstrap.com/ -- NOTE: Use only the
classes, the i tag is already in the code.''',
}),
)
class TicketingExclusionInline(admin.TabularInline):
model = TicketingExclusion
filter_horizontal = ("tickets",)
class RoleExclusionInline(admin.TabularInline):
model = RoleExclusion
class EligibilityConditionAdmin(admin.ModelAdmin):
list_display = ('checklistitem',
'ticketing_exclusions',
'role_exclusions',
'__str__')
list_filter = ['checklistitem']
inlines = [
TicketingExclusionInline,
RoleExclusionInline
]
def ticketing_exclusions(self, obj):
return obj.ticketing_ticketingexclusion.count()
def role_exclusions(self, obj):
return obj.ticketing_roleexclusion.count()
class TicketEligibilityConditionAdmin(admin.ModelAdmin):
filter_horizontal = ("tickets",)
list_display = ('checklistitem',
'ticketing_exclusions',
'role_exclusions',
'__str__')
list_filter = ['checklistitem']
inlines = [
TicketingExclusionInline,
RoleExclusionInline
]
def ticketing_exclusions(self, obj):
return obj.ticketing_ticketingexclusion.count()
def role_exclusions(self, obj):
return obj.ticketing_roleexclusion.count()
class SyncStatusAdmin(admin.ModelAdmin):
list_display = ('pk',
'is_success',
'import_type',
'import_number',
'error_msg',
'created_at',
'updated_at')
class RoleExcludeAdmin(admin.ModelAdmin):
list_display = ('pk',
'condition',
'role',
'event')
class TicketExcludeAdmin(admin.ModelAdmin):
list_display = ('pk',
'condition',
'__str__')
admin.site.register(BrownPaperSettings, BrownPaperSettingsAdmin)
admin.site.register(EventbriteSettings)
admin.site.register(PayPalSettings, PayPalSettingsAdmin)
admin.site.register(TicketingEvents, TicketingEventsAdmin)
admin.site.register(TicketItem, TicketItemAdmin)
admin.site.register(Purchaser, PurchaserAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(TicketingEligibilityCondition,
TicketEligibilityConditionAdmin)
admin.site.register(RoleEligibilityCondition,
EligibilityConditionAdmin)
admin.site.register(CheckListItem)
admin.site.register(SyncStatus, SyncStatusAdmin)
admin.site.register(TicketingExclusion, TicketExcludeAdmin)
admin.site.register(RoleExclusion, RoleExcludeAdmin)
|
"""
Challenge 2: Create a Multi Operation Transaction
"""
from stellar_sdk import Server, Keypair, TransactionBuilder, Network, FeeBumpTransaction
import requests
# 1. Load Keys
server = Server("https://horizon-testnet.stellar.org")
#stellar_quest_keypair = Keypair.from_secret("Shhhhhhh")
stellar_quest_keypair = Keypair.from_secret("SCRZVJ4D2IW5UN4L5LN7DOE3RRQ6XPH2P4RXMSP5SFO2ZGR5BQ3LXGH6")
quest_account_pub_key = stellar_quest_keypair.public_key
quest_account_priv_key = stellar_quest_keypair.secret
# 2. Create Another account
print("Loading Accounts...")
random_keypair = Keypair.random()
random_keypair_pub_key = random_keypair.public_key
random_keypair_priv_key = random_keypair.secret
# 3. Fund Another account using TestBot
print("Funding Random Account...")
url = 'https://friendbot.stellar.org'
response = requests.get(url, params={'addr': random_keypair.public_key})
print(f"Friendbot responded with {response}")
# 4. Create Inner Transaction
print("Building Inner Transaction...")
base_fee = server.fetch_base_fee()
account = server.load_account(quest_account_pub_key)
other_account = server.load_account(random_keypair_pub_key)
inner_transaction = (
TransactionBuilder(
source_account=account,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE,
base_fee=base_fee,
).append_bump_sequence_op(
bump_to=1
)
.build()
)
print('Signing Inner Transaction...')
inner_transaction.sign(quest_account_priv_key)
# 5. Create Fee Bump transactionn
fee_bump_tx = TransactionBuilder.build_fee_bump_transaction(
fee_source=account,
base_fee=base_fee,
inner_transaction_envelope=inner_transaction,
network_passphrase=Network.TESTNET_NETWORK_PASSPHRASE
)
fee_bump_tx.sign(random_keypair_priv_key)
response = server.submit_transaction(fee_bump_tx)
print(f"This is the final response: {response}")
|
# -*- coding: utf-8 -*-
from .setup import *
from scipy.ndimage import gaussian_filter1d
from scipy.sparse import block_diag, identity, bmat, diags, spdiags
from scipy.sparse.linalg import gmres, spsolve
from lmfit import minimize, Parameters, report_fit # for pp vs b1
#from scikits.umfpack import spsolve
#from pypardiso import spsolve
import time
#from arnoldi import *
from math import ceil
import copy
import numpy as np
import matplotlib.pyplot as plt
from . import gconvl
def cw_spec(bgrid=np.linspace(-60, 60, 128)+3360, params_in=dict(), basis_file='xoxo', prune_on=0):
'''
calculates the derivative spectrum for a given magnetic field grid, basis file input
Inputs
------
bgrid: grid of magnetic field values in Gauss, need not be uniformly spaced
params_in: dictionary of parameters
basis_file: input basis file; very unlikely this will be used for saturation calculations
prune_on: integer; 0 means no prune, 1 means prune matx, use the pruned matx to prune matz and then proceed
Output
------
tuple of bgrid and the derivative spectrum calculated by forward difference
bgrid is an input parameter, so redundant to output bgrid; will change in a future version
'''
simparams_double = np.array(([2.008820, 2.006200, 2.002330, 5.20, 5.80, 34.40, 8.18, 8.18, 9.27, 0, 0, 0,
0, 0, 0.0, 45, 0, 0, 0, 0, 0, 0, 2.0, 0.0, 0, 0, 0, 0.0, 0, 0, np.log10(2*8.8e4), 0, 3360, 0, 0, 0, 0, 0]))
lemx, lomx, kmx, mmx = budil_basis_size(
simparams_double[params_double_def['dx']], simparams_double[params_double_def['b0']]) # [12,9,4,4]
# ([2,0,0,22,13,7,7,2])#([2,0,0,44,33,14,14,2])
simparams_int = np.array([2, 0, 0, lemx, lomx, kmx, mmx, 2])
# simparams_int=np.array([2,0,0,22,19,14,2,2])
# read parameters from the dictionary
for x in params_in:
if x in params_double_def:
simparams_double[params_double_def[x]] = params_in[x]
if x in params_int_def:
simparams_int[params_int_def[x]] = params_in[x]
# off-diagonal space shift shiftx (same as lb!)
shiftx = params_in['shiftx'] if 'shiftx' in params_in else 0.0
# diagonal space shift shiftz
shiftz = params_in['shiftz'] if 'shiftz' in params_in else 0.0
# prune tol
ptol = params_in['ptol'] if 'ptol' in params_in else 0.0 # 001
# gmres tol
gmres_tol = params_in['gmres_tol'] if 'gmres_tol' in params_in else 0.0000001
# overall scaling factor
scale = params_in['scale'] if 'scale' in params_in else 1.0
# overall x axis shift factor
shiftg = params_in['shiftg'] if 'shiftg' in params_in else 0.0
# gib0
gib0 = params_in['gib0'] if 'gib0' in params_in else 0.0
# gib2
gib2 = params_in['gib2'] if 'gib2' in params_in else 0.0
# nort
nort = int(params_in['nort']) if 'nort' in params_in else 10
# print parameters
print(dict(zip(params_double_def.keys(), simparams_double)))
print(dict(zip(params_int_def.keys(), simparams_int)))
# b0, should be there in simparams_double
B0 = simparams_double[params_double_def['b0']]
# b1, should be there in params_in
B1 = params_in['b1']
print('Computing '+str(B1)+' Gauss')
#cfact=1e-06*np.mean(simparams_double[:3])*9.2731e-21 / 1.05443e-27
# omarrG=bgrid*2*np.pi/cfact
omarrG = bgrid+shiftg-B0
basis_file_trunc = 'xoxo'
res = np.zeros_like(omarrG)
#print('Computing '+str(B1)+' Gauss')
# prune the off-diag space matrices; prune=1 means prune matx, use it to prune everything else
# will add prune=2 for the case of pruning post mat_full creation
if prune_on == 1:
ommin, ommax = -25, 25
prune_bgrid = np.linspace(ommin, ommax, 20)
# np.array([2.0084,2.0054,2.0019,5.0,5.0,32.6,5.3622,5.3622,6.6544,0,0,0,0,0,5.646,45,0,0,0,0,0,0,2.2572,-2.1782,0,0,0,6.733,0,0,5.568,0,6167.6,0,0,0,0,0])
simparams_double1 = copy.deepcopy(simparams_double)
# np.array([2,0,0,lemx,lomx,kmx,mmx,2])#([2,0,0,22,13,7,7,2])#([2,0,0,44,33,14,14,2])
simparams_int1 = copy.deepcopy(simparams_int)
simparams_double1[params_double_def['psi']] = 0.00001 # prune for one orientation
matx1, matz1, pp1, stvx1 = generate_from_params(
basis_file_trunc, simparams_double1, simparams_int1)
matx1 += 1.0j*B0*identity(matx1.shape[0])
prune_resv = np.zeros((matx1.shape[0], len(prune_bgrid)))
for i in range(len(prune_bgrid)):
m = matx1+(shiftx-1.0j*prune_bgrid[i]+1.0j*B0)*identity(matx1.shape[0])
InvPrec = spdiags(1/m.diagonal(), [0], m.shape[0], m.shape[1])
invec = spsolve(m, stvx1)
prune_resv[:, i] = np.abs(invec/(stvx1.conjugate().transpose() @ invec))
prune_offdiag = np.max(prune_resv, axis=1) > ptol
prune_diag = (pp1 @ prune_offdiag) != 0
# prune the offdiag matrix
matx1 = (matx1[prune_offdiag, :].tocsc())[:, prune_offdiag].tocsr()
# prune the off-diag space starting vector
stvx1 = stvx1[prune_offdiag]
# prune the diag space matrix
matz1 = (matz1[prune_diag, :].tocsc())[:, prune_diag].tocsr()
# prune the pulse propagator
pp1 = (pp1[prune_diag, :].tocsc())[:, prune_offdiag].tocsr()
if nort > 0: # MOMD
for iort in range(nort):
cspsi = iort/(nort-1) # epsilon to avoid psi=0 exactly
gib = gib0 + gib2*(1-cspsi**2)
wline = np.sqrt(gib*gib+shiftx*shiftx)
if cspsi == 1:
cspsi -= 1.0e-6
# np.array([2.0084,2.0054,2.0019,5.0,5.0,32.6,5.3622,5.3622,6.6544,0,0,0,0,0,5.646,45,0,0,0,0,0,0,2.2572,-2.1782,0,0,0,6.733,0,0,5.568,0,6167.6,0,0,0,0,0])
simparams_double1 = copy.deepcopy(simparams_double)
# np.array([2,0,0,lemx,lomx,kmx,mmx,2])#([2,0,0,22,13,7,7,2])#([2,0,0,44,33,14,14,2])
simparams_int1 = copy.deepcopy(simparams_int)
simparams_double1[params_double_def['psi']] = np.arccos(cspsi)*180.0/np.pi
print([simparams_double1])
# print(simparams_int1)
scal_momd = 0.5/(nort-1) if iort == 0 or iort == nort-1 else 1.0/(nort-1)
matx1, matz1, pp1, stvx1 = generate_from_params(
basis_file_trunc, simparams_double1, simparams_int1)
matx1 += 1.0j*B0*identity(matx1.shape[0])
if prune_on == 1: # prune
matx1 = (matx1[prune_offdiag, :].tocsc())[:, prune_offdiag].tocsr()
stvx1 = stvx1[prune_offdiag]
matz1 = (matz1[prune_diag, :].tocsc())[:, prune_diag].tocsr()
pp1 = (pp1[prune_diag, :].tocsc())[:, prune_offdiag].tocsr()
mat_full = bmat([[matx1, 0.5j*B1*pp1.transpose(), None], [0.5j*B1*pp1, matz1, -
0.5j*B1*pp1], [None, -0.5j*B1*pp1.transpose(), matx1.conjugate().transpose()]])
ndimo = matx1.shape[0]
ndimd = matz1.shape[0]
stvx_full = np.hstack((1.0j*stvx1, np.zeros(ndimd), -1.0j*stvx1))
stvx_full_left = abs(B1)*np.hstack((stvx1, np.zeros(ndimo+ndimd)))
shifts = block_diag((shiftx*identity(ndimo), shiftz *
identity(ndimd), shiftx*identity(ndimo)))
signs = block_diag((identity(ndimo), 0*identity(ndimd), -identity(ndimo)))
'''
mat_full=matx1
stvx_full=stvx1
stvx_full_left=abs(B1)*stvx1
shifts=shiftx*identity(ndimo)
signs=identity(ndimo)
print(ndimo)
'''
tmpres = 0 * res
if mat_full.shape[0] > KRYLOV_THRESH:
for i in range(len(omarrG)):
InvPrec = diags(1/(mat_full+shifts-1.0j*omarrG[i]*signs).diagonal())
sol, info = gmres(
mat_full+shifts-1.0j*omarrG[i]*signs, stvx_full, None, gmres_tol, 200, ceil(mat_full.shape[0]/2000), InvPrec)
#sol,info = gmres(mat_full+shifts-1.0j*omarrG[i]*signs,stvx_full,None,gmres_tol,20,100,InvPrec)
if info > 0:
print("GMRES didn't converge for field offset " +
str(omarrG[i])+", might be ok for other field values")
tmpres[i] = scal_momd*np.imag(stvx_full_left.transpose()@sol)
else:
for i in range(len(omarrG)):
#sol = spsolve(mat_full+(shiftx-1.0j*omarrG[i])*identity(mat_full.shape[0]),stvx_full)
sol = spsolve(mat_full+shifts-1.0j*omarrG[i]*signs, stvx_full)
tmpres[i] = scal_momd*np.imag(stvx_full_left.transpose()@sol)
if wline > 0:
dummy_omarrG = np.linspace(min(omarrG), max(omarrG), 1000)
#dummy_spec = np.sqrt(2*np.pi)*0.5*gaussian_filter1d(np.interp(dummy_omarrG, omarrG, tmpres), sigma=int(2*len(dummy_omarrG)*wline/(max(dummy_omarrG)-min(dummy_omarrG))))
dummy_spec = gconvl.gconvl(np.hstack((np.interp(dummy_omarrG, omarrG, tmpres), np.zeros(
MXPT-len(dummy_omarrG)))), wline, np.diff(dummy_omarrG)[0], 1000, 2048)[:len(dummy_omarrG)]
res += np.interp(omarrG, dummy_omarrG, dummy_spec)
else:
res += tmpres
'''
for i in range(len(omarrG)):
X = Q[:,:-1].transpose().conjugate() @ ((mat_full+shifts-1.0j*omarrG[i]*signs) @ Q[:,:-1])
sol = Q[:,:-1] @ np.linalg.solve(X,np.eye(h.shape[1],1))
res[i]+=np.real(1.0j*stvx_full_left.transpose().conjugate()@sol)
'''
else: # no MOMD
print('nort was set to 0, will zero out psi and potential terms as well, no gib2 either')
wline = np.sqrt(gib0*gib0+shiftx*shiftx)
# np.array([2.0084,2.0054,2.0019,5.0,5.0,32.6,5.3622,5.3622,6.6544,0,0,0,0,0,5.646,45,0,0,0,0,0,0,2.2572,-2.1782,0,0,0,6.733,0,0,5.568,0,6167.6,0,0,0,0,0])
simparams_double1 = copy.deepcopy(simparams_double)
# np.array([2,0,0,lemx,lomx,kmx,mmx,2])#([2,0,0,22,13,7,7,2])#([2,0,0,44,33,14,14,2])
simparams_int1 = copy.deepcopy(simparams_int)
for x in ['c20', 'c22', 'psi']:
simparams_double1[params_double_def[x]] = 0.0
print([simparams_double1])
matx1, matz1, pp1, stvx1 = generate_from_params(
basis_file_trunc, simparams_double1, simparams_int1)
matx1 += 1.0j*B0*identity(matx1.shape[0])
if prune_on == 1: # prune
matx1 = (matx1[prune_offdiag, :].tocsc())[:, prune_offdiag].tocsr()
stvx1 = stvx1[prune_offdiag]
matz1 = (matz1[prune_diag, :].tocsc())[:, prune_diag].tocsr()
pp1 = (pp1[prune_diag, :].tocsc())[:, prune_offdiag].tocsr()
mat_full = bmat([[matx1, 0.5j*B1*pp1.transpose(), None], [0.5j*B1*pp1, matz1, -
0.5j*B1*pp1], [None, -0.5j*B1*pp1.transpose(), matx1.conjugate().transpose()]])
ndimo = matx1.shape[0]
ndimd = matz1.shape[0]
stvx_full = np.hstack((1.0j*stvx1, np.zeros(ndimd), -1.0j*stvx1))
stvx_full_left = abs(B1)*np.hstack((stvx1, np.zeros(ndimo+ndimd)))
shifts = block_diag((shiftx*identity(ndimo), shiftz *
identity(ndimd), shiftx*identity(ndimo)))
signs = block_diag((identity(ndimo), 0*identity(ndimd), -identity(ndimo)))
tmpres = np.zeros_like(omarrG)
for i in range(len(omarrG)):
sol = spsolve(mat_full+shifts-1.0j*omarrG[i]*signs, stvx_full)
tmpres[i] = np.imag(stvx_full_left.transpose()@sol)
# add wline
if wline > 0:
dummy_omarrG = np.linspace(min(omarrG), max(omarrG), 1000)
dummy_spec = gconvl.gconvl(np.hstack((np.interp(dummy_omarrG, omarrG, tmpres), np.zeros(
MXPT-len(dummy_omarrG)))), wline, np.diff(dummy_omarrG)[0], 1000, 2048)[:len(dummy_omarrG)]
#dummy_spec = np.sqrt(2*np.pi)*0.5*gaussian_filter1d(np.interp(dummy_omarrG, omarrG, tmpres), sigma=int(2*len(dummy_omarrG)*wline/(max(dummy_omarrG)-min(dummy_omarrG))))
res = np.interp(omarrG, dummy_omarrG, dummy_spec)
else:
res = tmpres
# return the derivative spectrum
return bgrid, scale*np.gradient(res, omarrG) # np.hstack((0,np.diff(res)/np.diff(omarrG)))
# fit Boris 5PC data
stt = time.time()
def sat_residual(params_fit=dict(), params_nonfit=dict(), bgrid=np.reshape(np.linspace(-60, 60, 256)+3360, (1, -1)), spec_expt=np.zeros((1, 128)), b1_list=[0.1], weights=[1]):
# only double prec parameters are fit parameters, spec_expt should be len(bgrid)
xx = []
for i in range(len(b1_list)):
b1 = b1_list[i]
params_in = {**params_nonfit, **{x: params_fit[x].value for x in params_fit}}
params_in['b1'] = b1
xx.append(weights[i]*(cw_spec(bgrid=bgrid[i], params_in=params_in,
basis_file='xoxo', prune_on=False)[1]-spec_expt[i]))
return np.hstack(xx) # eps_data
"""
# INITIAL IMPLEMENTATION
params = Parameters()
scale_init = 1888 # 1494 #366.5 * 0.25 * 10 * 1.05/0.7
shiftg_init = -3.9
t1edi_init = 5.046 # 5.077 #4.8 + 0.7 -0.3 - 0.3 + 0.15 - 0.07 + 0.1 + 0.03
gib0_init = 1.94 # 1.5
gib2_init = 0.01
#shiftx_init = 0
#params.add('b1', value=b1_init, min=0.0005, max=1)
#params.add('shiftg', value=shiftg_init, min=-15, max=15)
params.add('scale', value=scale_init, min=0.001, max=10000)
params.add('t1edi', value=t1edi_init, min=3, max=8)
#params.add('shiftx', value=shiftx_init, min=0, max=10)
params.add('gib0', value=gib0_init, min=0.01, max=3)
params.add('gib2', value=gib2_init, min=0, max=3)
B1max = 0.9
dB_list = ['30', '13', '6', '2', '0'] # ['0','2','4','10','20','40']
other_list = ['028', '201', '451', '715', '9']
num_spec = len(dB_list)
bgrid = []
spec_expt = []
b1_list = []
weights = []
for i in range(len(dB_list)):
f = 'PC5_T19_dB'+dB_list[i]+'_B10pt'+other_list[i]+'.dat'
aa = np.loadtxt(f, delimiter=',')
aa = aa[0:-1:8, :]
bgrid.append(aa[:, 0])
spec_expt.append(aa[:, 1])
b1_list.append(B1max*10**(-0.05*int(dB_list[i])))
weights.append(1/(max(aa[:, 1])-min(aa[:, 1])))
out = minimize(sat_residual, params, args=(
{'shiftg': shiftg_init, 'nort': 20}, bgrid, spec_expt, b1_list, weights))
report_fit(out)
print('Time taken: ', time.time()-stt)
"""
'''
FIT AGAIN ON APR 27, TOOK ~11k seconds, 10 orientations
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 21
# data points = 640
# variables = 3
chi-square = 0.90174186
reduced chi-square = 0.00141561
Akaike info crit = -4195.53290
Bayesian info crit = -4182.14850
[[Variables]]
scale: 1887.95325 +/- 48.6151116 (2.58%) (init = 1494)
t1edi: 5.04603352 +/- 0.01036494 (0.21%) (init = 5.077)
gib0: 1.94223282 +/- 0.08735447 (4.50%) (init = 1.5)
[[Correlations]] (unreported correlations are < 0.100)
C(scale, t1edi) = -0.842
C(scale, gib0) = 0.440
'''
'''
FIT AGAIN ON APR 25, TOOK 13332 seconds, 8 orientations
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 25
# data points = 768
# variables = 3
chi-square = 107.610454
reduced chi-square = 0.14066726
Akaike info crit = -1503.32884
Bayesian info crit = -1489.39747
[[Variables]]
scale: 1790.24558 +/- 36.4245140 (2.03%) (init = 1494)
t1edi: 5.08824387 +/- 0.01085657 (0.21%) (init = 5.077)
gib0: 1.53034846 +/- 0.06546246 (4.28%) (init = 0.5)
[[Correlations]] (unreported correlations are < 0.100)
C(scale, t1edi) = -0.802
C(scale, gib0) = 0.404
Time taken: 13332.69223189354
'''
'''
FIT AGAIN ON APR 28, TOOK ~55k seconds (nort=20 instead of 10), 4 parameters (gib2 added)
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 53
# data points = 640
# variables = 4
chi-square = 0.88989694
reduced chi-square = 0.00139921
Akaike info crit = -4201.99539
Bayesian info crit = -4184.14952
[[Variables]]
scale: 1865.94534 +/- 47.9072161 (2.57%) (init = 1888)
t1edi: 5.04481806 +/- 0.01023307 (0.20%) (init = 5.046)
gib0: 1.29573435 +/- 0.29837452 (23.03%) (init = 1.94)
gib2: 0.65236874 +/- 0.29276701 (44.88%) (init = 0.01)
[[Correlations]] (unreported correlations are < 0.100)
C(gib0, gib2) = -0.956
C(scale, t1edi) = -0.829
C(scale, gib0) = 0.281
C(scale, gib2) = -0.163
Time taken: 55056.00435447693
'''
|
#!/usr/bin/python
import lutin.debug as debug
import lutin.tools as tools
def get_type():
return "BINARY"
def get_sub_type():
return "TEST"
def get_desc():
return "test chunkware"
def get_licence():
return "MPL-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return "authors.txt"
def configure(target, my_module):
my_module.add_src_file([
'test/main.cpp'
])
my_module.add_depend(['audio-algo-chunkware', 'test-debug'])
return True
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidBST(self, root: Optional[TreeNode]) -> bool:
ary = []
self.traverse(root, ary)
return self.is_valid(ary)
# inorder
def traverse(self, root, ary):
if root.left is not None:
self.traverse(root.left, ary)
ary.append(root.val)
if root.right is not None:
self.traverse(root.right, ary)
def is_valid(self, ary):
for i in range(1, len(ary)):
if ary[i-1] >= ary[i]:
return False
return True
|
from requests.auth import AuthBase
class TokenAuthenticator(AuthBase):
"""Token bases authenticator
This authenticator will add the token in the Authorization header of the
request
"""
def __init__(self, token, authentication_type=None):
"""Create a new TokenAuthenticator object
:param str token: the token
"""
self.token = token
self.authentication_type = authentication_type
def _create_authorization_value(self):
if self.authentication_type is not None:
return "{} {}".format(self.authentication_type, self.token)
else:
return self.token
def __call__(self, request):
request.headers["Authorization"] = self._create_authorization_value()
return request
class RequestParameterAuthenticator(AuthBase):
"""Request parameter authentication
This authenticator will put the api key in a url parameter of a request
"""
def __init__(self, api_key, parameter_name):
"""Create a new RequestParameterAuthenticator object
:param str api_key: the api key
:param str parameter_name: the name of the parameter to put the key
"""
self._api_key = api_key
self._parameter_name = parameter_name
def __call__(self, r):
r.prepare_url(r.url, {self._parameter_name: self._api_key})
return r
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.