content
stringlengths 5
1.05M
|
|---|
# Generated by Django 3.0.1 on 2020-01-08 09:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('WebAXEL', '0047_auto_20200107_1358'),
]
operations = [
migrations.CreateModel(
name='RobotCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categorie', models.CharField(max_length=255, verbose_name='Catégorie du robot')),
],
),
migrations.AlterField(
model_name='dataset',
name='categories_dataset',
field=models.ManyToManyField(blank=True, default=None, to='WebAXEL.DataSetCategory', verbose_name='Catégories du jeu de données'),
),
migrations.AlterField(
model_name='dataset',
name='dataset',
field=models.FileField(null=True, upload_to='static/datasets/', verbose_name='Fichier jeu de données'),
),
migrations.AlterField(
model_name='dataset',
name='date_ajout',
field=models.DateTimeField(auto_now_add=True, verbose_name="Date d'ajout du jeu de données"),
),
migrations.AlterField(
model_name='dataset',
name='description',
field=models.TextField(blank=True, null=True, verbose_name='Description du jeu de données'),
),
migrations.AlterField(
model_name='datasetcategory',
name='categorie',
field=models.CharField(max_length=255, verbose_name='Catégorie du jeu de données'),
),
migrations.AlterField(
model_name='document',
name='categories_document',
field=models.ManyToManyField(blank=True, default=None, to='WebAXEL.DocumentCategory', verbose_name='Catégories du Document'),
),
migrations.CreateModel(
name='Robot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nom', models.CharField(max_length=255, verbose_name='Nom')),
('model', models.CharField(max_length=255, verbose_name='Modèle')),
('utilisation', models.CharField(max_length=255, verbose_name='Utilisation')),
('date_ajout', models.DateTimeField(auto_now_add=True, verbose_name="Date d'ajout du robot")),
('description', models.TextField(blank=True, null=True, verbose_name='Description du robot')),
('categories_robot', models.ManyToManyField(blank=True, default=None, to='WebAXEL.DataSetCategory', verbose_name='Catégories du robot')),
],
),
]
|
from __future__ import absolute_import
import functools
import black_magic.decorator
from test.benchmark import _common
class StandardPartial(_common.Base):
def __init__(self):
self.func = functools.partial(_common.func, 0)
class BlackMagicPartial(_common.Base):
def __init__(self):
self.func = black_magic.decorator.partial(_common.func, 0)
if __name__ == '__main__':
_common.main(StandardPartial, BlackMagicPartial)
|
class ERROR_CODE(object):
NOT_FOUND_ERROR = 404
|
# -*- coding: utf-8 -*-
"""
link
~~~~~~~~~~~~
The link module helps you connect to all of the data sources you need through a
simple configuration
Sample Config to connect to mysql::
{
"dbs":{
"my_db": {
"wrapper": "MysqlDB",
"host": "mysql-master.123fakestreet.net",
"password": "<password>",
"user": "<user>",
"database": "<database_name>"
}
}
}
Sample Code::
In [3]: from link import lnk
# uses the keys from your configuration to look up and create the
# appropriate objects
In [35]: my_db = lnk.dbs.my_db
In [36]: data = my_db.select('select id from my_table')
:copyright: (c) 2013 by David Himrod
:license: Apache2, see LICENSE for more details.
"""
#import all of this version information
__version__ = '0.2.10'
__author__ = 'David Himrod'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 David Himrod'
__title__ = 'link'
from .link import Link, Wrapper, lnk
from common import *
|
#!/usr/bin/env python3
"""
9.xxx
2 Octet Floating Point Number
"""
import struct
def encode(value):
s = 0
e = 0
if value < 0:
s = 0x8000
m = int(value * 100)
while (m > 2047) or (m < -2048):
e = e + 1
m = m >> 1
num = s | (e << 11) | (int(m) & 0x07ff)
ret = bytearray([0])
ret.extend(struct.pack('>H', int(num)))
return ret
def decode(data):
if len(data) != 2:
return None
i1 = data[0]
i2 = data[1]
s = (i1 & 0x80) >> 7
e = (i1 & 0x78) >> 3
m = (i1 & 0x07) << 8 | i2
if s == 1:
s = -1 << 11
f = (m | s) * 0.01 * pow(2, e)
return round(f, 2)
|
import os
import sys
import argparse
def prompt_sudo():
try:
os.mkdir('/test')
os.rmdir('/test')
except PermissionError:
print('You need root permission to generate the service!')
sys.exit(1)
prompt_sudo()
parser = argparse.ArgumentParser()
parser.add_argument("--name", help="Name of the service")
parser.add_argument("--type", help="Type of the service")
parser.add_argument("--desc", help="Description of the service")
parser.add_argument("--start", help="Start cmd of the service")
parser.add_argument("--stop", help="Stop cmd of the service")
args = parser.parse_args(sys.argv[1:])
name = args.name or input('Enter name for service: ')
type = args.type or input('Enter type for service (Press Enter for default): ')
description = args.desc or input('Enter description for service: ')
startOperation = args.start or input('Enter service start command: ')
stopOperation = args.stop or input('Enter service stop command (Press Enter for none): ')
if type is '' or None:
type = 'simple'
if stopOperation:
stopOperation = 'ExecStop=%s' % stopOperation
with open(f'/etc/systemd/system/{name}.service', 'w') as f:
try:
f.write("""
[Unit]
Description=%s
After=multi-user.target
[Service]
Type=%s
ExecStart=%s
%s
[Install]
WantedBy=multi-user.target
""" % (description, type, startOperation, stopOperation))
except IOError as err:
print(f'An Input/Output error happened!\n{err}')
cmd = f'sudo systemctl enable {name}'
print(f'To enable the service, type: {cmd}')
|
import os
from itertools import combinations
file_path = os.path.join(os.path.dirname(__file__), "input.txt")
with open(file_path, 'r') as input:
all_lines = input.readlines()
for i in range(len(all_lines)):
all_lines[i] = all_lines[i].strip('\n')
all_lines[i] = int(all_lines[i])
preamble = []
for i in range(25):
preamble.append(all_lines.pop(0))
valid = False
for num in all_lines:
for pair in combinations(preamble, r=2):
if num == sum(pair):
valid = True
if valid:
preamble.pop(0)
preamble.append(num)
valid = False
else:
print(num)
break
|
# -*- coding: utf-8 -*-
"""
@author: Yi Zhang
@contact: zhangyi_aero@hotmail.com
@time: 2022/05/07 3:46 PM
"""
import sys
if './' not in sys.path: sys.path.append('./')
from screws.freeze.base import FrozenOnly
class FrameSegments(FrozenOnly):
""""""
def __init__(self, cell, edge_name, segments):
"""
Parameters
----------
cell
edge_name : str
{'U', 'D', 'L', 'R'}
"""
self._cell_ = cell
self._edge_ = edge_name
self._segments_ = segments
self._freeze_self_()
@property
def edge(self):
return self._edge_
def __iter__(self):
"""Go through all segments on this edge of a root-cell."""
for _r in self._segments_:
yield self._segments_[_r]
if __name__ == "__main__":
# mpiexec -n 4 python objects/nCSCG/rf2/_2d/mesh/cell/frame/segments/main.py
pass
|
import random
#to print out the board
def drawBoard (board):
print (' | |')
print (' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print (' | |')
print ('------------')
print (' | |')
print (' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print (' | |')
print ('------------')
print (' | |')
print (' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print (' | |')
def inputPlayerLetter():
letter = ''
while not (letter == 'X' or letter == 'O'):
print('What do you want: X or O?')
letter = input().upper()
if letter == 'X':
return ['X','O']
else:
return ['O','X']
def firstTurn():
if random.randint(0,1) == 0:
return 'computer'
else:
return 'player'
def playAgain():
print ('Do you want to play again?')
return input().lower().startswith('y')
def makeMove(board , letter , move):
board[move] = letter
def winner(bo,le):
return ((bo[7]==le and bo[8]==le and bo[9]==le) or
(bo[4]==le and bo[5]==le and bo[6]==le) or
(bo[1]==le and bo[2]==le and bo[3]==le) or
(bo[7]==le and bo[4]==le and bo[1]==le) or
(bo[8]==le and bo[5]==le and bo[2]==le) or
(bo[9]==le and bo[6]==le and bo[3]==le) or
(bo[9]==le and bo[5]==le and bo[1]==le) or
(bo[7]==le and bo[5]==le and bo[3]==le))
def getBoardCopy(board):
dupeBoard = []
for i in board:
dupeBoard.append(i)
return dupeBoard
def isSpaceFree(board, move):
return board[move] == ' '
def getPlayerMove(board):
move = ' '
while move not in '1 2 3 4 5 6 7 8 9'.split() or not isSpaceFree(board,int(move)):
print ('What is your next move? (1-9)')
move = input()
return int (move)
def chooseRandomMoveFromList (board,movesList):
possibleMoves = []
for i in movesList:
if isSpaceFree(board,i):
possibleMoves.append(i)
if len(possibleMoves) != 0:
return random.choice(possibleMoves)
else:
return None
def getComputerMove(board,computerLetter):
if computerLetter == 'X':
playerLetter == 'O'
else:
playerLetter == 'X'
#to check if we can the next move
for i in range(1,10):
copy = getBoardCopy(board)
if isSpaceFree(copy,i):
makeMove(copy, computerLetter , i)
if winner(copy, computerLetter):
return i
#to check if player can win in their next move and block them
for i in range(1,10):
copy = getBoardCopy(board)
if isSpaceFree(copy,i):
makeMove(copy, playerLetter , i)
if winner(copy, playerLetter):
return i
#try to take one of the corners if they are free
move = chooseRandomMoveFromList (board, [1, 3, 7, 9])
if move != None:
return move
#try to take the centre, if it is free
if isSpaceFree(board, [5]):
return 5
#move on one of the sides
return chooseRandomMoveFromList (board, [2, 4, 6, 8])
def isBoardFull (board):
#return true if every space is full, otherwise false
for i in range(1, 10):
if isSpaceFree(board, i):
return False
return True
print ('Welcome to Tic Tac Toe')
while True:
#reset the board
theBoard = [' '] * 10
playerLetter, computerLetter = inputPlayerLetter()
turn = firstTurn()
print ('The ' + turn + ' will go first. ')
gameIsPlaying = True
while gameIsPlaying:
if turn == 'player':
#Player's turn
drawBoard(theBoard)
move = getPlayerMove(theBoard)
makeMove (theBoard , playerLetter , move)
if winner (theBoard, playerLetter):
drawBoard(theBoard)
print('You have won the game!!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a Tie!!')
break
else:
turn = 'computer'
else:
#Computer's turn
move = getComputerMove(theBoard, computerLetter)
makeMove(theBoard, computerLetter, move)
if winner(theBoard, computerLetter):
drawBoard(theBoard)
print('The computer has won!! You lose!')
gameIsPlaying = False
else:
if isBoardFull(theBoard):
drawBoard(theBoard)
print('The game is a Tie!!')
break
else:
turn = 'player'
if not playAgain():
break
|
# Keeps the rates.csv file updated
from pandas import *
from pyvalet import ValetInterpreter
# Fetch the data using Valet API
vi = ValetInterpreter()
# Step 1: Get all foreign exchange rates
# CAD to INR
_, cad_inr = vi.get_series_observations('FXCADINR')
dates = cad_inr.loc[3:]['id']
cad_inr = cad_inr.loc[3:]['label'].astype(float)
# USD to CAD
_, usd_cad = vi.get_series_observations('FXUSDCAD')
usd_cad = usd_cad.loc[3:]['label'].astype(float)
# EUR to CAD
_, eur_cad = vi.get_series_observations('FXEURCAD')
eur_cad = eur_cad.loc[3:]['label'].astype(float)
# GBP to CAD
_, gbp_cad = vi.get_series_observations('FXGBPCAD')
gbp_cad = gbp_cad.loc[3:]['label'].astype(float)
# Step 2: Covert and round all currencies to INR
# USD to INR
usd_inr = round(usd_cad * cad_inr, 2)
# USD to INR
eur_inr = round(eur_cad * cad_inr, 2)
# USD to INR
gbp_inr = round(gbp_cad * cad_inr, 2)
# Round CAD to INR
cad_inr = round(cad_inr, 2)
# Step 3: Merge the data
merged = concat([dates, cad_inr, usd_inr, eur_inr, gbp_inr], axis=1)
merged.columns = ['date', 'CAD', 'USD', 'EUR', 'GBP']
# Step 4: Write to file
merged.to_csv('rates.csv', index=False)
|
import sys
import csv
import StringIO
import json
from threading import Thread
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer
clients = []
class SockClient(WebSocket):
def handleMessage(self):
if self.data is None:
self.data = ''
try:
self.sendMessage(str(self.data))
except:
print "Could not send message to client."
def handleConnected(self):
print self.address, 'connected'
clients.append(self)
def handleClose(self):
clients.remove(self)
print self.address, 'closed'
def sendEvent(self, evt):
print "sending message to ",self.address
self.sendMessage(evt)
def triggerEvent(evt):
for client in clients[:]:
client.sendEvent(evt)
def startServer():
server = SimpleWebSocketServer('', 8080, SockClient)
server.serveforever()
serverThread = Thread(target = startServer)
serverThread.start()
try:
while True:
line = sys.stdin.readline()
data = csv.reader(StringIO.StringIO(line))
lst = []
for r in data:
lst.append(r)
triggerEvent(json.dumps(lst))
except KeyboardInterrupt:
sys.exit()
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for frequency analysis."""
import numpy as np
import tensorflow.compat.v1 as tf
from frequency_analysis import freq_heatmap
class FreqHeatmapTest(tf.test.TestCase):
def test_heatmap(self):
x = tf.placeholder(dtype=tf.float32, shape=(None, 3, 3))
y = tf.placeholder(dtype=tf.int64, shape=None)
flat_x = tf.reshape(x, [-1, 9])
np.random.seed(8629)
row_1 = 0.3 + 0.5 * np.random.randn(9)
init_val = np.transpose(np.array([row_1, (-1.0) * row_1]))
init_w = tf.constant_initializer(init_val)
w = tf.get_variable('weights', shape=(9, 2), initializer=init_w)
logits = tf.matmul(flat_x, w)
predictions = tf.argmax(logits, 1)
correct_prediction = tf.equal(predictions, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
init = tf.global_variables_initializer()
with self.test_session() as sess:
x_np = np.array([0.2 * np.ones([3, 3]), (-0.2) * np.ones([3, 3])])
y_np = np.array([0, 1])
data_dict = {x: x_np, y: y_np}
sess.run(init, feed_dict=data_dict)
# Compute Fourier heatmaps and test error using generate_freq_heatmap.
neural_network = freq_heatmap.TensorFlowNeuralNetwork(
sess, x, y, [logits], accuracy)
heatmaps, test_acc, clean_test_acc = freq_heatmap.generate_freq_heatmap(
neural_network, x_np, y_np)
# Compute the Fourier heatmaps without using generate_freq_heatmap.
heatmap_check = np.zeros([3, 3])
test_acc_check = np.zeros([3, 3])
for pos in [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1)]:
fft_basis = np.zeros([3, 3], dtype=np.complex64)
if pos == (1, 1):
fft_basis[1, 1] = 1.0
else:
fft_basis[pos[0], pos[1]] = 0.5 + 0.5j
fft_basis[2 - pos[0], 2 - pos[1]] = 0.5 - 0.5j
basis = 3 * np.real(np.fft.ifft2(np.fft.ifftshift(fft_basis)))
basis_flat = np.reshape(basis, [1, 9])
logit_change = np.matmul(basis_flat, init_val)
change_norm = np.linalg.norm(logit_change)
heatmap_check[pos[0], pos[1]] = change_norm
heatmap_check[2 - pos[0], 2 - pos[1]] = change_norm
data_dict_basis = {x: x_np + basis, y: y_np}
test_acc_basis = sess.run(accuracy, feed_dict=data_dict_basis)
test_acc_check[pos[0], pos[1]] = test_acc_basis
test_acc_check[2 - pos[0], 2 - pos[1]] = test_acc_basis
heatmap_check /= np.amax(heatmap_check)
clean_test_acc_check = sess.run(accuracy, feed_dict=data_dict)
self.assertAllClose([heatmap_check], heatmaps)
self.assertAllClose(test_acc_check, test_acc)
self.assertEqual(clean_test_acc_check, clean_test_acc)
if __name__ == '__main__':
tf.test.main()
|
import boto3
import fire
if __name__ == '__main__':
fire.Fire(boto3)
|
import re
from string import punctuation
from string import digits
import pymorphy2
import codecs
def prepr(input: str):
input = input.lower()
input = re.sub(':', '', input)
input = re.sub("#", '', input)
delreg = "(\S+(\.|\/)+\S+)+"
input = re.sub(delreg, '', input)
stop = punctuation + digits + '»«'
ru = 'йцукенгшщзхъфывапролджэячсмитьбюЙЦУКЕНГШЩЗХЪФЫВАПРОЛДЖЭЯЧСМИТЬБЮ '
input = ''.join([s for s in input if s not in stop and s in ru])
stopwords = []
morph = pymorphy2.MorphAnalyzer()
input = [morph.parse(word)[0].normal_form for word in input.split()]
with codecs.open("stopwords.txt", 'r', 'utf-8') as file:
for line in file:
stopwords += line
input = [word for word in input if word not in stopwords]
return input
|
import os
import sys
'''
Since tweetme.py is now present in parent directory we need to add appropriate path in order to
access it as a module.
Path is added with respect to the location user is running the tests from.
'''
parent_directory_path = os.getcwd().split('/')
if 'tests' in parent_directory_path:
parent_directory_path = parent_directory_path[:len(os.getcwd().split('/'))-1]
parent_directory_path = '/'.join(parent_directory_path)
if parent_directory_path not in sys.path:
sys.path.append(parent_directory_path)
|
from typing import List
from odm2_postgres_api.schemas.schemas import (
SamplingFeaturesCreate,
VariablesCreate,
EquipmentModelCreate,
LoggersTimeSeriesCreate,
DataQualityCreate,
ControlledVocabularyCreate,
)
def loggers_controlled_vocabularies() -> List[ControlledVocabularyCreate]:
loggers_cv = [
{
"name": "Discarded",
"term": "discarded",
"definition": "Measurement point categorised as bad measurement by a domain expert.",
"controlled_vocabulary_table_name": "cv_censorcode",
},
{
"name": "Approved",
"term": "approved",
"definition": "Measurement point categorised as good measurement by a domain expert.",
"controlled_vocabulary_table_name": "cv_censorcode",
},
]
return [ControlledVocabularyCreate(**cv) for cv in loggers_cv]
def loggers_sampling_features() -> List[SamplingFeaturesCreate]:
# NOTE: how do we store coordinates of Site? sampling_feature_annotation?
# Langtjernet: 60.21482433130298, 11.240493855592176
sampling_features = [
{
"samplingfeatureuuid": "904fca98-3c36-4902-992f-a18f9da72b1e",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Langtjern_boye",
},
{
"samplingfeatureuuid": "1bd73c59-fcaa-459f-92a2-b999773c60bb",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Langtjern_inlet",
},
{
"samplingfeatureuuid": "62d5af54-bee3-4631-a798-9c4ba6f6abf5",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Langtjern_outlet",
},
{
"samplingfeatureuuid": "62f3a1f4-aa32-4b8e-ae9c-b5e3fc947cd3",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Langtjern_weather",
},
{
"samplingfeatureuuid": "1a30038f-85b0-4045-a66e-af8504205a4a",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Iskoras_outlet",
},
{
"samplingfeatureuuid": "2d6dceb2-9769-437a-a20d-b36f41d4ae29",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Maalselva",
},
{
"samplingfeatureuuid": "ce6cb9fe-0023-445c-ae95-57ad615afc8f",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Adventelva",
},
{
"samplingfeatureuuid": "33130cf7-78b3-4680-aa0b-539f31f4e0ca",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Lundevann",
},
{
"samplingfeatureuuid": "da7b2aa0-4f74-4a25-aa8b-7dfa63795339",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Svanfoss",
},
{
"samplingfeatureuuid": "c8b2270e-64e8-4c20-b98b-7b494f0c1315",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Rosten",
},
{
"samplingfeatureuuid": "017a9014-3476-4fd1-8dd3-80c4f2a7c644",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Kviteberg",
},
{
"samplingfeatureuuid": "9e366f0f-56dd-45aa-b2eb-9048c0013cf5",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Sjoa",
},
{
"samplingfeatureuuid": "528af2ce-394c-446d-9caa-4ac04773091d",
"samplingfeaturetypecv": "Site",
"samplingfeaturecode": "Kraakfoss",
},
]
return [SamplingFeaturesCreate(**sf) for sf in sampling_features]
def loggers_qc_tests() -> List[DataQualityCreate]:
qc_tests = [
{
"dataqualitytypecv": "Accuracy",
"dataqualitycode": "frozen_test",
"dataqualitydescription": "Five (or more) identical consecutive measurements indicates "
"that sensor is mal-functioning (frozen).",
},
{
"dataqualitytypecv": "Accuracy",
"dataqualitycode": "snowdepth_variability_test",
"dataqualitydescription": "Measurements of snow depth which show high variability of depth gradient"
"over time are inaccurate.",
},
{
"dataqualitytypecv": "Accuracy",
"dataqualitycode": "range_test",
"dataqualitydescription": "Values outside configurable range are labelled as bad.",
},
{
"dataqualitytypecv": "Accuracy",
"dataqualitycode": "spike_test",
"dataqualitydescription": "Temporary extreme increase in value is labelled as bad.",
},
]
return [DataQualityCreate(**qc) for qc in qc_tests]
def loggers_variables() -> List[VariablesCreate]:
loggers_variables_list = [
{
"variabletypecv": "Chemistry",
"variablenamecv": "Oxygen, dissolved percent of saturation",
"variabledefinition": "Oxygen, dissolved percent of saturation at 1m. Last 10 samples.",
"variablecode": "OxygenSat_1m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Oxygen, dissolved percent of saturation",
"variabledefinition": "Oxygen, dissolved percent of saturation at 6m. Last 10 samples.",
"variablecode": "OxygenSat_6m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 0.5m",
"variablecode": "Temp_0.5m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 1m",
"variablecode": "Temp_1m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 1.5m",
"variablecode": "Temp_1.5m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 2m",
"variablecode": "Temp_2m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 3m",
"variablecode": "Temp_3m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 4m",
"variablecode": "Temp_4m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 6m",
"variablecode": "Temp_6m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, at depth 8m",
"variablecode": "Temp_8m",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Fluorescence, dissolved organic matter (DOM)",
"variabledefinition": "Fluorescence of dissolved organic matter (DOM), CDOM sensor",
"variablecode": "CO2Value_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Carbon dioxide",
"variabledefinition": "Carbon dioxide",
"variablecode": "CDOMdigitalFinal_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Electrical conductivity",
"variabledefinition": "Electrical conductivity",
"variablecode": "CondValue_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Water level",
"variabledefinition": "Water level in a lake - vannstand",
"variablecode": "LevelValue_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "pH",
"variabledefinition": "pH is the measure of the acidity or alkalinity "
"of a solution. pH is formally a measure of the activity "
"of dissolved hydrogen ions (H+). Solutions in which the "
"concentration of H+ exceeds that of OH- have a pH value "
"lower than 7.0 and are known as acids.",
"variablecode": "PhValue_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, ground",
"variablecode": "Temp_ground_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, water",
"variablecode": "Temp_water_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, air",
"variablecode": "Temp_air_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, ground at 15 cm",
"variablecode": "Temp_ground_15cm_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, ground at 20 cm",
"variablecode": "Temp_ground_20cm_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Water level",
"variabledefinition": "Water level in a bucket",
"variablecode": "BV_mm",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Global Radiation",
"variabledefinition": "Solar radiation, direct and diffuse, "
"received from a solid angle of 2p steradians "
"on a horizontal surface. Source: World Meteorological Organization, Meteoterm",
"variablecode": "GS_Wpm2_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Relative humidity",
"variabledefinition": "Relative humidity",
"variablecode": "LF_psnt_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor, air",
"variablecode": "LT_gr_C_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Precipitation",
"variabledefinition": "Precipitation growth last hour",
"variablecode": "NB_mm",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Wind speed",
"variabledefinition": "Wind speed max",
"variablecode": "VH_3_s_Max",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Wind speed",
"variabledefinition": "Wind speed",
"variablecode": "VH_mps_WVc(1)",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Wind direction",
"variabledefinition": "Wind direction",
"variablecode": "VH_mps_WVc(2)",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Water level",
"variabledefinition": "Water level",
"variablecode": "waterLevel_mm_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Climate",
"variablenamecv": "Snow depth",
"variabledefinition": "Snow depth",
"variablecode": "snowValue_mm_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Voltage",
"variabledefinition": "Battery voltage",
"variablecode": "Batt_Volt_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Voltage",
"variabledefinition": "Battery voltage",
"variablecode": "Batt_V_Min",
"nodatavalue": -9999,
},
# Iskoras
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Counter",
"variabledefinition": "Signal counter used to check whether all signals which are used "
"as in input to aggregation (average) of conductivity values are present",
"variablecode": "CondTeller",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Counter",
"variabledefinition": "Signal counter used to check whether all signals which are used "
"as in input to aggregation (average) of pH values are present",
"variablecode": "PHTeller",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Counter",
"variabledefinition": "Signal counter used to check whether all signals which are used "
"as in input to aggregation (average) of level values are present",
"variablecode": "LevelTeller",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Counter",
"variabledefinition": "Signal counter used to check weather all signals which are used "
"as in input to aggregation (average) of temperature values are present",
"variablecode": "TempLevelTeller",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature measured by conductivity sensor setup",
"variablecode": "TempCond_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature measured by Ph sensor",
"variablecode": "TempPh_Avg",
"nodatavalue": -9999,
},
# Maalselva variables are not averages
{
"variabletypecv": "Chemistry",
"variablenamecv": "Electrical conductivity",
"variabledefinition": "Electrical conductivity",
"variablecode": "CondValue",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "pH",
"variabledefinition": "pH is the measure of the acidity or alkalinity "
"of a solution. pH is formally a measure of the activity "
"of dissolved hydrogen ions (H+). Solutions in which the "
"concentration of H+ exceeds that of OH- have a pH value "
"lower than 7.0 and are known as acids.",
"variablecode": "PhValue",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Fluorescence, dissolved organic matter (DOM)",
"variabledefinition": "Fluorescence of dissolved organic matter (DOM), CDOM sensor",
"variablecode": "CDOMdigitalFinal",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Fluorescence, dissolved organic matter (DOM)",
"variabledefinition": "Fluorescence of dissolved organic matter (DOM), CDOM sensor",
"variablecode": "CDOManalogFinal",
"nodatavalue": -9999,
},
{
"variabletypecv": "Instrumentation",
"variablenamecv": "Voltage",
"variabledefinition": "Battery voltage",
"variablecode": "Batt_Volt",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Temperature, sensor",
"variabledefinition": "Temperature, sensor",
"variablecode": "Temp",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Turbidity",
"variabledefinition": "Turbidity",
"variablecode": "Turbidity",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Turbidity",
"variabledefinition": "Turbidity",
"variablecode": "Turbidity_Avg",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Salinity",
"variabledefinition": "Salinity",
"variablecode": "Salinity",
"nodatavalue": -9999,
},
{
"variabletypecv": "Chemistry",
"variablenamecv": "Salinity",
"variabledefinition": "Salinity",
"variablecode": "Salinity_Avg",
"nodatavalue": -9999,
},
]
return [VariablesCreate(**v) for v in loggers_variables_list]
def loggers_equipment_models(man_id: int) -> List[EquipmentModelCreate]:
equipment_models = [
{
"modelmanufacturerid": man_id,
"modelpartnumber": "CS511-L",
"modelname": "Dissolved Oxygen Sensor",
"isinstrument": True,
},
]
return [EquipmentModelCreate(**em) for em in equipment_models]
def loggers_timeseries_result() -> List[LoggersTimeSeriesCreate]:
timeseries = [
# Langtjern_boye
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "OxygenSat_1m",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "%",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "OxygenSat_6m",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "%",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_0.5m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_1m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_1.5m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_2m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_3m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_4m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_6m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Temp_8m",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_boye",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
# Langtjern_outlet
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "CDOMdigitalFinal_Avg",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "CO2Value_Avg",
"unitstypecv": "Concentration count per volume",
"unitsabbreviation": "ppmv",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "LevelValue_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "Temp_water_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_outlet",
"variablecode": "Temp_ground_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
# Langtjern_inlet
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "CDOMdigitalFinal_Avg",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "LevelValue_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "Temp_water_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "Temp_ground_20cm_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_inlet",
"variablecode": "Temp_ground_15cm_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
# Langtjern_weather
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "Batt_V_Min",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "LevelValue_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "BV_mm",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "GS_Wpm2_Avg",
"unitstypecv": "Energy per area",
"unitsabbreviation": "W/m2",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "LF_psnt_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "%",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "LT_gr_C_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "NB_mm",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "VH_3_s_Max",
"unitstypecv": "Linear velocity",
"unitsabbreviation": "m/s",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "VH_mps_WVc(1)",
"unitstypecv": "Linear velocity",
"unitsabbreviation": "m/s",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "VH_mps_WVc(2)",
"unitstypecv": "Angle",
"unitsabbreviation": "deg",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "waterLevel_mm_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "snowValue_mm_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "snowdepth_variability_test"],
},
# Iskoras
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "CondTeller",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "PHTeller",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "LevelTeller",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "TempLevelTeller",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "TempCond_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "TempPh_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Iskoras_outlet",
"variablecode": "LevelValue_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
# Maalselva
{
"samplingfeaturecode": "Maalselva",
"variablecode": "PhValue",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Temp",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CondValue",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CDOManalogFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Salinity",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Batt_Volt",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Turbidity",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Unknown",
},
# Lundevann
{
"samplingfeaturecode": "Lundevann",
"variablecode": "PhValue",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "Temp",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "CondValue",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "Turbidity",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
# Adventelva
{
"samplingfeaturecode": "Adventelva",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "LevelValue_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "Temp_water_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
# Svanfoss
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "TempPh_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "Salinity_Avg",
"unitstypecv": "Salinity",
"unitsabbreviation": "ppt",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Svanfoss",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
},
# Rosten
{
"samplingfeaturecode": "Rosten",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "TempPh_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "Salinity_Avg",
"unitstypecv": "Salinity",
"unitsabbreviation": "ppt",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Rosten",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
},
# Kviteberg
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "Batt_V_Min",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "GS_Wpm2_Avg",
"unitstypecv": "Energy per area",
"unitsabbreviation": "W/m2",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "LF_psnt_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "%",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "LT_gr_C_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "VH_3_s_Max",
"unitstypecv": "Linear velocity",
"unitsabbreviation": "m/s",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "VH_mps_WVc(1)",
"unitstypecv": "Linear velocity",
"unitsabbreviation": "m/s",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Kviteberg",
"variablecode": "VH_mps_WVc(2)",
"unitstypecv": "Angle",
"unitsabbreviation": "deg",
"aggregationstatisticcv": "Unknown",
},
# Sjoa
{
"samplingfeaturecode": "Sjoa",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "TempPh_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "Salinity_Avg",
"unitstypecv": "Salinity",
"unitsabbreviation": "ppt",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Sjoa",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
},
# Sjoa
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "Batt_Volt_Avg",
"unitstypecv": "Electromotive force",
"unitsabbreviation": "Volts",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "Temp_air_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "TempPh_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "Salinity_Avg",
"unitstypecv": "Salinity",
"unitsabbreviation": "ppt",
"aggregationstatisticcv": "Average",
},
{
"samplingfeaturecode": "Kraakfoss",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
},
]
return [LoggersTimeSeriesCreate.parse_obj(ts) for ts in timeseries]
def loggers_update_qc() -> List[LoggersTimeSeriesCreate]:
timeseries_qc = [
{
"samplingfeaturecode": "Langtjern_weather",
"variablecode": "snowValue_mm_Avg",
"unitstypecv": "Length",
"unitsabbreviation": "mm",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "snowdepth_variability_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "PhValue",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Temp",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CondValue",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "CDOManalogFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Maalselva",
"variablecode": "Turbidity",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "PhValue_Avg",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "CondValue_Avg",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "mS/m",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "Turbidity_Avg",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Adventelva",
"variablecode": "Temp_water_Avg",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Average",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "PhValue",
"unitstypecv": "Dimensionless",
"unitsabbreviation": "-",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "Temp",
"unitstypecv": "Temperature",
"unitsabbreviation": "degC",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "CondValue",
"unitstypecv": "Electrical conductivity",
"unitsabbreviation": "µS/cm",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "Turbidity",
"unitstypecv": "Turbidity",
"unitsabbreviation": "NTU",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
{
"samplingfeaturecode": "Lundevann",
"variablecode": "CDOMdigitalFinal",
"unitstypecv": "Fluorescence",
"unitsabbreviation": "µg/L",
"aggregationstatisticcv": "Unknown",
"dataqualitycodes": ["frozen_test", "range_test", "spike_test"],
},
]
return [LoggersTimeSeriesCreate.parse_obj(ts) for ts in timeseries_qc]
|
'''
Module for reading Cosmo Skymed HDF5 imagery. This is more or less
a line-for-line port of the reader from NGA's MATLAB SAR Toolbox.
'''
# SarPy imports
from .sicd import MetaNode
from . import Reader as ReaderSuper # Reader superclass
from . import sicd
from ...geometry import geocoords as gc
from ...geometry import point_projection as point
from .utils import chipper
# Python standard library imports
import copy
import datetime
# External dependencies
import numpy as np
import h5py
# We prefer numpy.polynomial.polynomial over numpy.polyval/polyfit since its coefficient
# ordering is consistent with SICD, and because it supports 2D polynomials.
from numpy.polynomial import polynomial as poly
from scipy.constants import speed_of_light
# try to import comb from scipy.special.
# If an old version of scipy is being used then import from scipy.misc
from scipy import __version__ as scipy_version
dot_locs = []
for i, version_char in enumerate(scipy_version):
if version_char == '.':
dot_locs.append(i)
major_version = int(scipy_version[0:dot_locs[0]])
if major_version >= 1:
from scipy.special import comb
else:
from scipy.misc import comb
__classification__ = "UNCLASSIFIED"
__author__ = ["Jarred Barber", "Wade Schwartzkopf"]
__email__ = "jpb5082@gmail.com"
def datenum_w_frac(datestring, as_datetime=False):
'''
Python's datetime type won't parse or stores times with finer than microsecond
precision, but CSK time are often represented down to the nanosecond. In order
to handle this precision we handle the fractional seconds separately so we can
process with the precision we need.
`as_datetime` returns a Python datetime object.
'''
epoch = datetime.datetime.strptime('2000-01-01 00:00:00',
'%Y-%m-%d %H:%M:%S')
if '.' in datestring:
date, frac = datestring.split('.')
else:
date = datestring
frac = '0'
date = datetime.datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
datenum_s = (date - epoch).total_seconds()
datenum_frac = float('0.' + frac)
if np.isnan(datenum_frac):
datenum_frac = 0
if as_datetime:
return date + datetime.timedelta(seconds=datenum_frac)
else:
return datenum_s, datenum_frac
def isa(filename):
"""Test to see if file is a product.xml file."""
try:
with h5py.File(filename, 'r') as h5:
if 'CSK' in h5.attrs['Satellite ID'].decode('ascii'):
return Reader
except Exception:
pass
class CSMChipper(chipper.Base):
def __init__(self, filename, band, meta):
self.filename = filename
def complextype(data):
return data[..., 0] + data[..., 1] * 1j
self.band = band
self.complextype = complextype
self.symmetry = [False, False, True]
with h5py.File(filename, 'r') as h5:
lineorder = h5.attrs['Lines Order'].decode('ascii')
columnorder = h5.attrs['Columns Order'].decode('ascii')
key = 'S%02d' % (self.band+1)
self.datasize = np.array(h5[key]['SBI'].shape[:2])
self.symmetry[1] = (columnorder != 'NEAR-FAR')
self.symmetry[0] = (lineorder == 'EARLY-LATE') != (
meta.SCPCOA.SideOfTrack == 'R')
def read_raw_fun(self, dim1rg, dim2rg):
if len(dim1rg) == 2:
dim1rg = list(dim1rg) + [1]
if len(dim2rg) == 2:
dim2rg = list(dim2rg) + [1]
with h5py.File(self.filename, 'r') as h5:
s1, e1, k1 = dim1rg
s2, e2, k2 = dim2rg
key = 'S%02d' % (self.band+1)
return h5[key]['SBI'][s1:e1:k1, s2:e2:k2, :]
class Reader(ReaderSuper):
def __init__(self, product_filename):
self.sicdmeta = meta2sicd(product_filename)
self.read_chip = [
CSMChipper(product_filename, band, self.sicdmeta[band])
for band in range(len(self.sicdmeta))
]
def meta2sicd(filename):
'''
Extract attributes from CSM HDF5 file and format as SICD
'''
return _convert_meta(*_extract_meta_from_HDF(filename))
def _populate_meta(root, recurse=True):
'''
DFS to merge all attrs into a single dict
'''
def f(v):
if isinstance(v, bytes):
return v.decode('ascii')
return v
meta = {k: f(v) for k, v in root.attrs.items()}
if recurse:
try:
for v in root.values():
try:
meta.update(_populate_meta(v))
except Exception: # Doesn't have attrs
pass
except AttributeError: # Doesn't have values()
pass
return meta
def _extract_meta_from_HDF(filename):
'''
Extract the attribute metadata from the HDF5 files
'''
band_meta = []
band_shapes = []
with h5py.File(filename, 'r') as h5:
h5meta = _populate_meta(h5, recurse=False)
# per-band data
numbands = len(h5.keys())
for i in range(numbands): # "pingpong" mode has multiple polarizations
groupname = '/S%02d' % (i+1)
band_meta.append(_populate_meta(h5[groupname]))
band_shapes.append(h5[groupname]['SBI'].shape[:2])
return h5meta, band_meta, band_shapes
def _convert_meta(h5meta, band_meta, band_shapes):
'''
Extract the CSM metadata into SICD format
Inputs:
h5meta: The attributes from the HDF5 root
band_meta: A list of dicts, with dict i containing the attributes from /S0{i+1}
band_shapes: The dataset shapes of each band dataset.
'''
def _polyshift(a, shift):
b = np.zeros(a.size)
for j in range(1, len(a) + 1):
for k in range(j, len(a) + 1):
b[j - 1] = b[j - 1] + (
a[k - 1] * comb(k - 1, j - 1) * np.power(shift, (k - j)))
return b
numbands = len(band_meta)
# CollectionInfo
output_meta = MetaNode()
output_meta.CollectionInfo = MetaNode()
output_meta.CollectionInfo.CollectorName = h5meta['Satellite ID']
output_meta.CollectionInfo.CoreName = str(h5meta['Programmed Image ID'])
output_meta.CollectionInfo.CollectType = 'MONOSTATIC'
output_meta.CollectionInfo.RadarMode = MetaNode()
if h5meta['Acquisition Mode'] in [
'HIMAGE', 'PINGPONG', 'WIDEREGION', 'HUGEREGION'
]:
output_meta.CollectionInfo.RadarMode.ModeType = 'STRIPMAP'
else:
# case {'ENHANCED SPOTLIGHT','SMART'} # "Spotlight"
output_meta.CollectionInfo.RadarMode.ModeType = 'DYNAMIC STRIPMAP'
output_meta.CollectionInfo.RadarMode.ModeID = h5meta['Multi-Beam ID']
output_meta.CollectionInfo.Classification = 'UNCLASSIFIED'
# ImageCreation
output_meta.ImageCreation = MetaNode()
img_create_time = datenum_w_frac(h5meta['Product Generation UTC'], True)
output_meta.ImageCreation.DateTime = img_create_time
output_meta.ImageCreation.Profile = 'Prototype'
# ImageData
output_meta.ImageData = MetaNode() # Just a placeholder
# Most subfields added below in "per band" section
# Used for computing SCP later
# GeoData
output_meta.GeoData = MetaNode()
if h5meta['Ellipsoid Designator'] == 'WGS84':
output_meta.GeoData.EarthModel = 'WGS_84'
# Most subfields added below in "per band" section
# Grid
output_meta.Grid = MetaNode()
if h5meta['Projection ID'] == 'SLANT RANGE/AZIMUTH':
output_meta.Grid.ImagePlane = 'SLANT'
output_meta.Grid.Type = 'RGZERO'
else:
output_meta.Grid.ImagePlane = 'GROUND'
output_meta.Grid.Row = MetaNode()
output_meta.Grid.Col = MetaNode()
output_meta.Grid.Row.Sgn = -1 # Always true for CSM
output_meta.Grid.Col.Sgn = -1 # Always true for CSM
fc = h5meta['Radar Frequency'] # Center frequency
output_meta.Grid.Row.KCtr = 2 * fc / speed_of_light
output_meta.Grid.Col.KCtr = 0
output_meta.Grid.Row.DeltaKCOAPoly = np.atleast_2d(0)
output_meta.Grid.Row.WgtType = MetaNode()
output_meta.Grid.Col.WgtType = MetaNode()
output_meta.Grid.Row.WgtType.WindowName = h5meta[
'Range Focusing Weighting Function'].rstrip().upper()
if output_meta.Grid.Row.WgtType.WindowName == 'HAMMING': # The usual CSM weigting
output_meta.Grid.Row.WgtType.Parameter = MetaNode()
output_meta.Grid.Row.WgtType.Parameter.name = 'COEFFICIENT'
output_meta.Grid.Row.WgtType.Parameter.value = \
str(h5meta['Range Focusing Weighting Coefficient'])
output_meta.Grid.Col.WgtType.WindowName = h5meta[
'Azimuth Focusing Weighting Function'].rstrip().upper()
if output_meta.Grid.Col.WgtType.WindowName == 'HAMMING': # The usual CSM weigting
output_meta.Grid.Col.WgtType.Parameter = MetaNode()
output_meta.Grid.Col.WgtType.Parameter.name = 'COEFFICIENT'
output_meta.Grid.Col.WgtType.Parameter.value = \
str(h5meta['Azimuth Focusing Weighting Coefficient'])
# WgtFunct will be populated in sicd.derived_fields
# More subfields added below in "per band" section
# Timeline
[collectStart,
collectStartFrac] = datenum_w_frac(h5meta['Scene Sensing Start UTC'])
[collectEnd,
collectEndFrac] = datenum_w_frac(h5meta['Scene Sensing Stop UTC'])
# We loose a bit of precision when assigning the SICD CollectStart
# field, since a Python datetime type just doesn't have enough
# bits to handle the full precision given in the CSK metadata. However, all
# relative times within the SICD metadata structure will be computed at
# full precision.
output_meta.Timeline = MetaNode()
output_meta.Timeline.IPP = MetaNode()
output_meta.Timeline.IPP.Set = MetaNode()
output_meta.Timeline.CollectStart = datenum_w_frac(
h5meta['Scene Sensing Start UTC'], True)
output_meta.Timeline.CollectDuration = datenum_w_frac(
h5meta['Scene Sensing Stop UTC'], True)
output_meta.Timeline.CollectDuration = (
output_meta.Timeline.CollectDuration -
output_meta.Timeline.CollectStart).total_seconds()
output_meta.Timeline.IPP.Set.TStart = 0
output_meta.Timeline.IPP.Set.TEnd = 0 # Apply real value later. Just a placeholder.
output_meta.Timeline.IPP.Set.IPPStart = 0
# More subfields added below in "per band" section
# Position
# Compute polynomial from state vectors
[ref_time, ref_time_frac] = datenum_w_frac(h5meta['Reference UTC'])
# Times in SICD are with respect to time from start of collect, but
# time in CSM are generally with respect to reference time.
ref_time_offset = np.round(ref_time - collectStart)
ref_time_offset += (
ref_time_frac - collectStartFrac) # Handle fractional seconds
state_vector_T = h5meta['State Vectors Times'] # In seconds
state_vector_T = state_vector_T + ref_time_offset # Make with respect to Timeline.CollectStart
state_vector_pos = h5meta['ECEF Satellite Position']
# sv2poly.m in MATLAB SAR Toolbox shows ways to determine best polynomial order,
# but 5th is almost always best
polyorder = np.minimum(5, len(state_vector_T) - 1)
P_x = poly.polyfit(state_vector_T, state_vector_pos[:, 0], polyorder)
P_y = poly.polyfit(state_vector_T, state_vector_pos[:, 1], polyorder)
P_z = poly.polyfit(state_vector_T, state_vector_pos[:, 2], polyorder)
# We don't use these since they are derivable from the position polynomial
# state_vector_vel = h5meta['ECEF Satellite Velocity']
# state_vector_acc = h5meta['ECEF Satellite Acceleration']
# P_vx = polyfit(state_vector_T, state_vector_vel(1,:), polyorder)
# P_vy = polyfit(state_vector_T, state_vector_vel(2,:), polyorder)
# P_vz = polyfit(state_vector_T, state_vector_vel(3,:), polyorder)
# P_ax = polyfit(state_vector_T, state_vector_acc(1,:), polyorder)
# P_ay = polyfit(state_vector_T, state_vector_acc(2,:), polyorder)
# P_az = polyfit(state_vector_T, state_vector_acc(3,:), polyorder)
# Store position polynomial
output_meta.Position = MetaNode()
output_meta.Position.ARPPoly = MetaNode()
output_meta.Position.ARPPoly.X = P_x
output_meta.Position.ARPPoly.Y = P_y
output_meta.Position.ARPPoly.Z = P_z
# RadarCollection
output_meta.RadarCollection = MetaNode()
output_meta.RadarCollection.RcvChannels = MetaNode()
output_meta.RadarCollection.RcvChannels.ChanParameters = []
tx_pol = []
for i in range(numbands):
pol = band_meta[i]['Polarisation']
output_meta.RadarCollection.RcvChannels.ChanParameters.append(MetaNode())
output_meta.RadarCollection.RcvChannels.ChanParameters[i].TxRcvPolarization = \
pol[0] + ':' + pol[1]
if pol[0] not in tx_pol:
tx_pol.append(pol[0])
if len(tx_pol) == 1:
output_meta.RadarCollection.TxPolarization = tx_pol
else:
output_meta.RadarCollection.TxPolarization = 'SEQUENCE'
output_meta.RadarCollection.TxSequence = []
for i in range(len(tx_pol)):
output_meta.RadarCollection.TxSequence.append(MetaNode())
output_meta.RadarCollection.TxSequence[i].TxStep = i + 1
output_meta.RadarCollection.TxSequence[i].TxPolarization = tx_pol[i]
# Most subfields added below in "per band" section
# ImageFormation
output_meta.ImageFormation = MetaNode()
output_meta.ImageFormation.RcvChanProc = MetaNode()
output_meta.ImageFormation.RcvChanProc.NumChanProc = 1
output_meta.ImageFormation.RcvChanProc.PRFScaleFactor = 1
output_meta.ImageFormation.ImageFormAlgo = 'RMA'
output_meta.ImageFormation.TStartProc = 0
output_meta.ImageFormation.TEndProc = output_meta.Timeline.CollectDuration
output_meta.ImageFormation.STBeamComp = 'SV'
output_meta.ImageFormation.ImageBeamComp = 'NO'
output_meta.ImageFormation.AzAutofocus = 'NO'
output_meta.ImageFormation.RgAutofocus = 'NO'
# More subfields added below in "per band" section
output_meta.RMA = MetaNode()
output_meta.RMA.RMAlgoType = 'OMEGA_K'
output_meta.RMA.ImageType = 'INCA'
output_meta.RMA.INCA = MetaNode()
output_meta.RMA.INCA.FreqZero = fc
# These polynomials are used later to determine RMA.INCA.DopCentroidPoly
t_az_ref = h5meta['Azimuth Polynomial Reference Time']
t_rg_ref = h5meta['Range Polynomial Reference Time']
# Strip of zero coefficients at end of polynomials. Not required but makes things cleaner.
dop_poly_az = h5meta['Centroid vs Azimuth Time Polynomial']
dop_poly_az = dop_poly_az[:(np.argwhere(dop_poly_az != 0.0)[-1, 0] + 1)]
dop_poly_rg = h5meta['Centroid vs Range Time Polynomial']
dop_poly_rg = dop_poly_rg[:(np.argwhere(dop_poly_rg != 0.0)[-1, 0] + 1)]
# dop_rate_poly_az = h5meta['Doppler Rate vs Azimuth Time Polynomial']
dop_rate_poly_rg = h5meta['Doppler Rate vs Range Time Polynomial']
dop_rate_poly_rg = dop_rate_poly_rg[:(np.argwhere(dop_poly_rg != 0.0)[-1, 0] + 1)]
# SCPCOA
output_meta.SCPCOA = MetaNode()
output_meta.SCPCOA.SideOfTrack = h5meta['Look Side'][0:1].upper()
# Most subfields added below in "per band" section, but we grab this field
# now so we know to flip from CSM's EARLY-LATE column order to SICD's
# view-from-above column order.
# Process fields specific to each polarimetric band
band_independent_meta = copy.deepcopy(
output_meta) # Values that are consistent across all bands
grouped_meta = []
for i in range(numbands):
output_meta = copy.deepcopy(band_independent_meta)
# ImageData
datasize = band_shapes[i] # All polarizations should be same size
output_meta.ImageData = MetaNode()
output_meta.ImageData.NumCols = datasize[0]
output_meta.ImageData.NumRows = datasize[1]
output_meta.ImageData.FullImage = copy.deepcopy(output_meta.ImageData)
output_meta.ImageData.FirstRow = 0
output_meta.ImageData.FirstCol = 0
output_meta.ImageData.PixelType = 'RE16I_IM16I'
# There are many different options for picking the SCP point. We chose
# the point that is closest to the reference zero-doppler and range
# times in the CSM metadata.
t_az_first = band_meta[i]['Zero Doppler Azimuth First Time']
# Zero doppler time of first column
ss_az_s = band_meta[i]['Line Time Interval']
# Image column spacing in zero doppler time (seconds)
output_meta.ImageData.SCPPixel = MetaNode()
output_meta.ImageData.SCPPixel.Col = int(
np.round((t_az_ref - t_az_first) / ss_az_s) + 1)
if output_meta.SCPCOA.SideOfTrack == 'L':
# Order of columns in SICD goes in reverse time for left-looking
ss_az_s = -ss_az_s
output_meta.ImageData.SCPPixel.Col = int(
output_meta.ImageData.NumCols -
output_meta.ImageData.SCPPixel.Col - 1)
# First column in SICD is actually last line in CSM terminology
t_az_first = band_meta[i]['Zero Doppler Azimuth Last Time']
t_rg_first = band_meta[i][
'Zero Doppler Range First Time'] # Range time of first row
if 'SCS' in h5meta['Product Type']:
# 'Column Time Interval' does not exist in detected products.
ss_rg_s = band_meta[i][
'Column Time Interval'] # Row spacing in range time (seconds)
output_meta.ImageData.SCPPixel.Row = int(
round((t_rg_ref - t_rg_first) / ss_rg_s) + 1)
else:
raise NotImplementedError('Only complex products supported')
# How Lockheed seems to pick the SCP:
output_meta.ImageData.SCPPixel = MetaNode()
output_meta.ImageData.SCPPixel.Col = datasize[0] // 2
output_meta.ImageData.SCPPixel.Row = int(np.ceil(datasize[1] / 2) - 1)
# GeoData
# Initially, we just seed this with a rough value. Later we will put
# in something more precise.
latlon = band_meta[i]['Centre Geodetic Coordinates']
output_meta.GeoData.SCP = MetaNode()
output_meta.GeoData.SCP.LLH = MetaNode()
output_meta.GeoData.SCP.ECF = MetaNode()
output_meta.GeoData.SCP.LLH.Lat = latlon[0]
output_meta.GeoData.SCP.LLH.Lon = latlon[1]
# CSM generally gives HAE as zero. Perhaps we should adjust this to DEM.
output_meta.GeoData.SCP.LLH.HAE = latlon[2]
ecf = gc.geodetic_to_ecf(latlon)[0]
output_meta.GeoData.SCP.ECF.X = ecf[0]
output_meta.GeoData.SCP.ECF.Y = ecf[1]
output_meta.GeoData.SCP.ECF.Z = ecf[2]
# Calling derived_sicd_fields at the end will populate these fields
# with the sensor model, so we don't need to do it here.
# latlon=get_hdf_attribute(dset_id(i),'Top Left Geodetic Coordinates')
# output_meta.GeoData.ImageCorners.ICP.FRFC.Lat=latlon(1)
# output_meta.GeoData.ImageCorners.ICP.FRFC.Lon=latlon(2)
# latlon=get_hdf_attribute(dset_id(i),'Bottom Left Geodetic Coordinates')
# output_meta.GeoData.ImageCorners.ICP.FRLC.Lat=latlon(1)
# output_meta.GeoData.ImageCorners.ICP.FRLC.Lon=latlon(2)
# latlon=get_hdf_attribute(dset_id(i),'Bottom Right Geodetic Coordinates')
# output_meta.GeoData.ImageCorners.ICP.LRLC.Lat=latlon(1)
# output_meta.GeoData.ImageCorners.ICP.LRLC.Lon=latlon(2)
# latlon=get_hdf_attribute(dset_id(i),'Top Right Geodetic Coordinates')
# output_meta.GeoData.ImageCorners.ICP.LRFC.Lat=latlon(1)
# output_meta.GeoData.ImageCorners.ICP.LRFC.Lon=latlon(2)
# Grid
output_meta.Grid.Row.SS = band_meta[i]['Column Spacing']
# Exactly equivalent to above:
# Grid.Row.SS=get_hdf_attribute(dset_id(i),'Column Time Interval')*speed_of_light/2
# Col.SS is derived after DRateSFPoly below, rather than used from this
# given field, so that SICD metadata can be internally consistent:
# output_meta.Grid.Col.SS = band_meta[i]['Line Spacing']
output_meta.Grid.Row.ImpRespBW = 2 * band_meta[i][
'Range Focusing Bandwidth'] / speed_of_light
output_meta.Grid.Row.DeltaK1 = -output_meta.Grid.Row.ImpRespBW / 2
output_meta.Grid.Row.DeltaK2 = -output_meta.Grid.Row.DeltaK1
# output_meta.Grid.Col.DeltaK1/2 will be populated by sicd.derived_fields
# ImpRespWid will be populated by sicd.derived_fields
# Timeline
prf = band_meta[i]['PRF']
output_meta.Timeline.IPP.Set.IPPEnd = int(
np.floor(prf * output_meta.Timeline.CollectDuration))
output_meta.Timeline.IPP.Set.IPPPoly = np.array([0, prf])
output_meta.Timeline.IPP.Set.TEnd = output_meta.Timeline.CollectDuration
# RadarCollection
# Absence of RefFreqIndex means all frequencies are true values
# output_meta.RadarCollection.RefFreqIndex=uint32(0)
chirp_length = band_meta[i]['Range Chirp Length']
chirp_rate = abs(band_meta[i]['Range Chirp Rate'])
bw = chirp_length * chirp_rate
output_meta.RadarCollection.TxFrequency = MetaNode()
output_meta.RadarCollection.TxFrequency.Min = fc - (bw / 2)
output_meta.RadarCollection.TxFrequency.Max = fc + (bw / 2)
output_meta.RadarCollection.Waveform = MetaNode()
output_meta.RadarCollection.Waveform.WFParameters = MetaNode()
output_meta.RadarCollection.Waveform.WFParameters.TxPulseLength = chirp_length
output_meta.RadarCollection.Waveform.WFParameters.TxRFBandwidth = bw
output_meta.RadarCollection.Waveform.WFParameters.TxFreqStart = \
output_meta.RadarCollection.TxFrequency.Min
output_meta.RadarCollection.Waveform.WFParameters.TxFMRate = chirp_rate
sample_rate = band_meta[i]['Sampling Rate']
if np.isnan(band_meta[i]['Reference Dechirping Time']):
output_meta.RadarCollection.Waveform.WFParameters.RcvDemodType = 'CHIRP'
output_meta.RadarCollection.Waveform.WFParameters.RcvFMRate = 0
else:
output_meta.RadarCollection.Waveform.WFParameters.RcvDemodType = 'STRETCH'
output_meta.RadarCollection.Waveform.WFParameters.RcvWindowLength = (
band_meta[i]['Echo Sampling Window Length']) / sample_rate
output_meta.RadarCollection.Waveform.WFParameters.ADCSampleRate = sample_rate
# ImageFormation
output_meta.ImageFormation.RcvChanProc.ChanIndex = i + 1
output_meta.ImageFormation.TxFrequencyProc = MetaNode()
output_meta.ImageFormation.TxFrequencyProc.MinProc = \
output_meta.RadarCollection.TxFrequency.Min
output_meta.ImageFormation.TxFrequencyProc.MaxProc = \
output_meta.RadarCollection.TxFrequency.Max
output_meta.ImageFormation.TxRcvPolarizationProc = \
output_meta.RadarCollection.RcvChannels.ChanParameters[i].TxRcvPolarization
# RMA
# Range time to SCP
t_rg_scp = t_rg_first + (ss_rg_s * float(output_meta.ImageData.SCPPixel.Row))
output_meta.RMA.INCA.R_CA_SCP = t_rg_scp * speed_of_light / 2
# Zero doppler time of SCP
t_az_scp = t_az_first + (ss_az_s * float(output_meta.ImageData.SCPPixel.Col))
# Compute DRateSFPoly
# We do this first since some other things are dependent on it.
# For the purposes of the DRateSFPoly computation, we ignore any
# changes in velocity or doppler rate over the azimuth dimension.
# Velocity is derivate of position.
scp_ca_time = t_az_scp + ref_time_offset # With respect to start of collect
vel_x = poly.polyval(scp_ca_time, poly.polyder(P_x))
vel_y = poly.polyval(scp_ca_time, poly.polyder(P_y))
vel_z = poly.polyval(scp_ca_time, poly.polyder(P_z))
vm_ca_sq = vel_x**2 + vel_y**2 + vel_z**2 # Magnitude of the velocity squared
# Polynomial representing range as a function of range distance from SCP
r_ca = np.array([output_meta.RMA.INCA.R_CA_SCP, 1.])
dop_rate_poly_rg_shifted = _polyshift(dop_rate_poly_rg, t_rg_scp - t_rg_ref)
dop_rate_poly_rg_scaled = dop_rate_poly_rg_shifted * np.power(
ss_rg_s / output_meta.Grid.Row.SS,
np.arange(0, len(dop_rate_poly_rg)))
output_meta.RMA.INCA.DRateSFPoly = -poly.polymul(
dop_rate_poly_rg_scaled, r_ca) * (
speed_of_light / (2.0 * fc * vm_ca_sq)) # Assumes a SGN of -1
output_meta.RMA.INCA.DRateSFPoly = np.array(
[output_meta.RMA.INCA.DRateSFPoly]) # .transpose()
# Fields dependent on Doppler rate
# This computation of SS is actually better than the claimed SS
# (Line Spacing) in many ways, because this makes all of the metadata
# internally consistent. This must be the sample spacing exactly at SCP
# (which is the definition for SS in SICD), if the other metadata from
# which is it computed is correct and consistent. Since column SS can vary
# slightly over a RGZERO image, we don't know if the claimed sample spacing
# in the native metadata is at our chosen SCP, or another point, or an
# average across image or something else.
output_meta.Grid.Col.SS = (np.sqrt(vm_ca_sq) * ss_az_s *
output_meta.RMA.INCA.DRateSFPoly[0, 0])
# Convert to azimuth spatial bandwidth (cycles per meter)
output_meta.Grid.Col.ImpRespBW = min(
band_meta[i]['Azimuth Focusing Bandwidth'] * abs(ss_az_s),
1) / output_meta.Grid.Col.SS # Can't have more bandwidth in data than sample spacing
output_meta.RMA.INCA.TimeCAPoly = np.array([scp_ca_time,
ss_az_s / output_meta.Grid.Col.SS])
# Compute DopCentroidPoly/DeltaKCOAPoly
output_meta.RMA.INCA.DopCentroidPoly = np.zeros((len(dop_poly_rg),
len(dop_poly_az)))
# Compute doppler centroid value at SCP
output_meta.RMA.INCA.DopCentroidPoly[0] = (
poly.polyval(t_rg_scp - t_rg_ref, dop_poly_rg) + poly.polyval(
t_az_scp - t_az_ref, dop_poly_az) - 0.5 *
(dop_poly_az[0] + dop_poly_rg[0])) # These should be identical
# Shift 1D polynomials to account for SCP
dop_poly_az_shifted = _polyshift(dop_poly_az, t_az_scp - t_az_ref)
dop_poly_rg_shifted = _polyshift(dop_poly_rg, t_rg_scp - t_rg_ref)
# Scale 1D polynomials to from Hz/s^n to Hz/m^n
dop_poly_az_scaled = dop_poly_az_shifted * np.power(
ss_az_s / output_meta.Grid.Col.SS, np.arange(0, len(dop_poly_az)))
dop_poly_rg_scaled = dop_poly_rg_shifted * np.power(
ss_rg_s / output_meta.Grid.Row.SS, np.arange(0, len(dop_poly_rg)))
output_meta.RMA.INCA.DopCentroidPoly[1:, 0] = dop_poly_rg_scaled[1:]
output_meta.RMA.INCA.DopCentroidPoly[0, 1:] = dop_poly_az_scaled[1:]
output_meta.RMA.INCA.DopCentroidCOA = True
output_meta.Grid.Col.DeltaKCOAPoly = (output_meta.RMA.INCA.DopCentroidPoly * ss_az_s /
output_meta.Grid.Col.SS)
# TimeCOAPoly
# TimeCOAPoly=TimeCA+(DopCentroid/dop_rate)
# Since we can't evaluate this equation analytically, we will evaluate
# samples of it across our image and fit a 2D polynomial to it.
# From radarsat.py
POLY_ORDER = 2 # Order of polynomial which we want to compute in each dimension
grid_samples = POLY_ORDER + 1
coords_az_m = np.linspace(
-output_meta.ImageData.SCPPixel.Col,
(output_meta.ImageData.NumCols - output_meta.ImageData.SCPPixel.Col - 1),
grid_samples) * output_meta.Grid.Col.SS
coords_rg_m = np.linspace(
-output_meta.ImageData.SCPPixel.Row,
(output_meta.ImageData.NumRows - output_meta.ImageData.SCPPixel.Row - 1),
grid_samples) * output_meta.Grid.Row.SS
[coords_az_m_2d, coords_rg_m_2d] = np.meshgrid(coords_az_m,
coords_rg_m)
timeca_sampled = poly.polyval2d(
coords_rg_m_2d, coords_az_m_2d,
np.atleast_2d(output_meta.RMA.INCA.TimeCAPoly))
dopcentroid_sampled = poly.polyval2d(
coords_rg_m_2d, coords_az_m_2d,
output_meta.RMA.INCA.DopCentroidPoly)
doprate_sampled = poly.polyval2d(
coords_rg_m_2d, coords_az_m_2d,
np.atleast_2d(dop_rate_poly_rg_scaled))
timecoa_sampled = timeca_sampled + (
dopcentroid_sampled / doprate_sampled)
# Least squares fit for 2D polynomial
a = np.zeros(((POLY_ORDER + 1)**2, (POLY_ORDER + 1)**2))
for k in range(POLY_ORDER + 1):
for j in range(POLY_ORDER + 1):
a[:, k * (POLY_ORDER + 1) + j] = np.multiply(
np.power(coords_az_m_2d.flatten(), j),
np.power(coords_rg_m_2d.flatten(), k))
A = np.zeros(((POLY_ORDER + 1)**2, (POLY_ORDER + 1)**2))
for k in range((POLY_ORDER + 1)**2):
for j in range((POLY_ORDER + 1)**2):
A[k, j] = np.multiply(a[:, k], a[:, j]).sum()
b_coa = [
np.multiply(timecoa_sampled.flatten(), a[:, k]).sum()
for k in range((POLY_ORDER + 1)**2)
]
x_coa = np.linalg.solve(A, b_coa)
output_meta.Grid.TimeCOAPoly = np.reshape(
x_coa, (POLY_ORDER + 1, POLY_ORDER + 1))
# Radiometric
output_meta.Radiometric = MetaNode()
if h5meta['Range Spreading Loss Compensation Geometry'] != 'NONE':
fact = h5meta['Reference Slant Range']**(
2 * h5meta['Reference Slant Range Exponent'])
if h5meta['Calibration Constant Compensation Flag'] == 0:
fact = fact * (1 / (h5meta['Rescaling Factor']**2))
fact = fact / band_meta[i]['Calibration Constant']
output_meta.Radiometric.BetaZeroSFPoly = np.array([[fact]])
# GeoData
# Now that sensor model fields have been populated, we can populate
# GeoData.SCP more precisely.
ecf = point.image_to_ground([
output_meta.ImageData.SCPPixel.Row,
output_meta.ImageData.SCPPixel.Col
], output_meta)[0]
output_meta.GeoData.SCP.ECF.X = ecf[0]
output_meta.GeoData.SCP.ECF.Y = ecf[1]
output_meta.GeoData.SCP.ECF.Z = ecf[2]
llh = gc.ecf_to_geodetic(ecf)[0]
output_meta.GeoData.SCP.LLH.Lat = llh[0]
output_meta.GeoData.SCP.LLH.Lon = llh[1]
output_meta.GeoData.SCP.LLH.HAE = llh[2]
# SCPCOA
sicd.derived_fields(output_meta)
grouped_meta.append(output_meta)
return grouped_meta
|
import re
from typing import Dict, List, Tuple, Type
from graia.amnesia.message import Element, MessageChain
from avilla.commander.utilles import gen_subclass
from avilla.core.elements import Text
ELEMENT_MAPPING: Dict[str, Type[Element]] = {i.__name__: i for i in gen_subclass(Element)}
def chain_from_mapping_string(string: str, mapping: Dict[str, Element]) -> "MessageChain":
"""从映射字符串与映射字典的元组还原消息链.
Args:
string (str): 映射字符串
mapping (Dict[int, Element]): 映射字典.
Returns:
MessageChain: 构建的消息链
"""
elements: List[Element] = []
for x in re.split("(\x02\\d+_\\w+\x03)", string):
if x:
if x[0] == "\x02" and x[-1] == "\x03":
index, class_name = x[1:-1].split("_")
if not isinstance(mapping[index], ELEMENT_MAPPING[class_name]):
raise ValueError("Validation failed: not matching element type!")
elements.append(mapping[index])
else:
elements.append(Text(x))
chain = MessageChain(elements)
return chain
def chain_to_mapping_str(
message_chain: MessageChain,
) -> Tuple[str, Dict[str, Element]]:
"""转换消息链为映射字符串与映射字典的元组.
Returns:
Tuple[str, Dict[str, Element]]: 生成的映射字符串与映射字典的元组
"""
elem_mapping: Dict[str, Element] = {}
elem_str_list: List[str] = []
for i, elem in enumerate(message_chain.content):
if not isinstance(elem, Text):
elem_mapping[str(i)] = elem
elem_str_list.append(f"\x02{i}_{elem.__class__.__name__}\x03")
else:
elem_str_list.append(elem.text)
return "".join(elem_str_list), elem_mapping
|
#!/usr/bin/env python3
"""
Copyright (C) 2018 Christian Thomas Jacobs
Pynea facilitates the reproducibility of LaTeX documents by embedding the scripts and data files required to regenerate the figures within the document itself.
Pynea is released under the MIT license. See the file LICENSE.md for more details.
"""
from subprocess import call
import fitz
import os.path
import git
import re
from pynea.resource import Resource
class Figure(Resource):
""" A figure to be included in the LaTeX document. """
def __init__(self, path, script, command, data):
""" Save the path to the figure file and resources used to generate it. """
# The path to the figure file itself.
self.path = path
# The script that generated the figure.
self.script = script
# The command used to execute the script.
self.command = command
# Any data files that the script depends on.
self.data = data
return
@property
def is_modified(self):
""" Return True if the figure, script, data and/or command has changed. """
# Has the figure itself changed?
figure_modified = super(Figure, self).is_modified
if(not figure_modified):
# Has the script which generated the figure changed?
script_modified = self.script.is_modified
if(not script_modified):
# Have the script's data files changed?
data_modified = any([d.is_modified for d in self.data])
if(not data_modified):
# Has the command used to run the script changed?
# Open figure and obtain the previously-used command from the metadata.
pdf = fitz.open(self.path)
keywords = pdf.metadata["keywords"]
# NOTE: If the previously-used command is not found then it is assumed that the figure has been modified.
if(keywords):
m = re.match(r"script_command:(.+),", keywords)
if(m):
previous_command = m.group(1)
command_modified = (self.command != previous_command)
if(not command_modified):
return False
return True
def generate(self):
""" Generate the figure by running the script which generates it. """
# Record the current working directory.
cwd = os.getcwd()
# Change to the script's working directory.
os.chdir(os.path.dirname(self.script.path))
# Re-run the command.
return_code = call(self.command, shell=True)
# Switch back to the current working directory.
os.chdir(cwd)
return return_code
def embed(self):
""" Embed the script and data files in the figure. """
# Open the PDF file that will host the script file and data files.
pdf = fitz.open(self.path)
# Embed script.
b = open(self.script.path, "rb").read()
try:
pdf.embeddedFileUpd(self.script.path, b)
except:
pdf.embeddedFileAdd(b, self.script.path)
pdf.save(pdf.name, incremental=True)
# Embed data files.
for data in self.data:
b = open(data.path, "rb").read()
try:
pdf.embeddedFileUpd(data.path, b)
except:
pdf.embeddedFileAdd(b, data.path)
pdf.save(pdf.name, incremental=True)
# Embed metadata (including command used to run the script).
metadata = pdf.metadata
metadata["keywords"] = "script_command:%s, script_git_revision:%s" % (self.command, self.script.revision)
pdf.setMetadata(metadata)
pdf.save(pdf.name, incremental=True)
# Close the figure.
pdf.close()
return
|
# Generated by Django 2.2.3 on 2020-06-02 21:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cers', '0003_auto_20200602_2111'),
]
operations = [
migrations.RenameField(
model_name='attendance',
old_name='is_entry',
new_name='is_in',
),
]
|
# -*- coding: utf-8 -*-
from django.contrib.auth.backends import ModelBackend
from .models import EmailAddress
from .utils import get_most_qualified_user_for_email_and_password
class EmailBackend(ModelBackend):
def authenticate(self, username=None, password=None):
"""
tries verified email addresses, the email field on user objects and unconfirmed email addresses.
username is not checked, since the default model backend already does that.
"""
username = username.strip()
return get_most_qualified_user_for_email_and_password(username, password)
class PermissionBackend(object):
def authenticate(self):
return None
def has_perm(self, user_obj, perm, obj=None):
# TODO: cache
if perm == 'aldryn_accounts.has_verified_email':
if not user_obj or user_obj.is_anonymous():
return False
return EmailAddress.objects.has_verified_email(user_obj)
return False
|
import os
import re
from shift_oelint_parser.parser import get_items
from shift_oelint_parser.cls_item import Variable, Item
from shift_oelint_parser.helper_files import expand_term, guess_recipe_name, guess_recipe_version, guess_base_recipe_name
from shift_oelint_parser.constants import CONSTANTS
class Stash(object):
def __init__(self, quiet=False):
"""constructor
"""
self.__list = []
self.__seen_files = set()
self.__map = {}
self.__quiet = quiet
def AddFile(self, _file, lineOffset=0, forcedLink=None):
"""Adds a file to the stash
Arguments:
_file {str} -- Full path to file
Keyword Arguments:
lineOffset {int} -- Line offset from the file that include this file (default: {0})
forcedLink {[type]} -- Force link against a file (default: {None})
Returns:
list -- List of {shift_oelint_parser.cls_item.Item}
"""
_, _ext = os.path.splitext(_file)
if _file in self.__seen_files and _ext not in [".inc"]:
return []
if not self.__quiet:
print("Parsing {}".format(_file))
self.__seen_files.add(_file)
res = get_items(self, _file, lineOffset=lineOffset)
if forcedLink:
for r in res:
r.IncludedFrom = forcedLink
if _file not in self.__map:
self.__map[_file] = []
self.__map[_file].append(forcedLink)
if forcedLink not in self.__map:
self.__map[forcedLink] = []
self.__map[forcedLink].append(_file)
# Match bbappends to bbs
if _file.endswith(".bbappend"):
bn_this = os.path.basename(_file).replace(
".bbappend", "").replace("%", ".*")
for item in self.__list:
if re.match(bn_this, os.path.basename(item.Origin)):
if _file not in self.__map:
self.__map[_file] = []
self.__map[_file].append(item.Origin)
if item.Origin not in self.__map:
self.__map[item.Origin] = []
self.__map[item.Origin].append(_file)
# find maximum line number of the origin
_maxline = max(x.Line for x in self.__list if x.Origin == item.Origin)
for r in res:
# pretend that we are adding the file to the end of the original
r.Line += _maxline
break
self.__list += res
return res
def Remove(self, item):
self.__list.remove(item)
def Finalize(self):
# cross link all the files
for k in self.__map.keys():
for l in self.__map[k]:
self.__map[k] += [x for x in self.__map[l] if x != k]
self.__map[k] = list(set(self.__map[k]))
for k, v in self.__map.items():
for item in [x for x in self.__list if x.Origin == k]:
for link in v:
item.AddLink(link)
def GetRecipes(self):
"""Get bb files in stash
Returns:
list -- List of bb files in stash
"""
return list(set([x.Origin for x in self.__list if x.Origin.endswith(".bb")]))
def GetLoneAppends(self):
"""Get bbappend without a matching bb
Returns:
list -- list of bbappend without a matching bb
"""
__linked_appends = []
__appends = []
for x in self.__list:
if x.Origin.endswith(".bbappend"):
__appends.append(x.Origin)
else:
__linked_appends += x.Links
x = list(set([x for x in __appends if x not in __linked_appends]))
return x
def __is_linked_to(self, item, filename, nolink=False):
return (filename in item.Links and not nolink) or filename == item.Origin
def __get_items_by_file(self, items, filename, nolink=False):
if not filename:
return items
return [x for x in items if self.__is_linked_to(x, filename, nolink=nolink)]
def __get_items_by_classifier(self, items, classifier):
if not classifier:
return items
return [x for x in items if x.CLASSIFIER == classifier]
def __get_items_by_attribute(self, items, attname, attvalue):
if not attname:
return items
# v is a list
res = [x for x in items if attname in x.GetAttributes().keys()]
if attvalue:
res = [x for x in res if (attname in x.GetAttributes(
).keys() and x.GetAttributes()[attname] == attvalue)]
return res
def GetLinksForFile(self, filename):
"""Get file which this file is linked against
Arguments:
filename {str} -- full path to file
Returns:
list -- list of full paths the file is linked against
"""
if not filename:
return []
return [x.Origin for x in self.__get_items_by_file(self.__list, filename) if x.Origin != filename]
def GetItemsFor(self, filename=None, classifier=None, attribute=None, attributeValue=None, nolink=False):
"""Get items for filename
Keyword Arguments:
filename {str} -- Full path to file (default: {None})
classifier {str} -- class specifier (e.g. Variable) (default: {None})
attribute {str} -- class attribute name (default: {None})
attributeValue {str} -- value of the class attribute name (default: {None})
nolink {bool} -- Consider linked files (default: {False})
Returns:
[type] -- [description]
"""
res = self.__list
res = self.__get_items_by_file(res, filename, nolink=nolink)
res = self.__get_items_by_classifier(res, classifier)
res = self.__get_items_by_attribute(res, attribute, attributeValue)
return sorted(list(set(res)), key=lambda x: x.Line)
def ExpandVar(self, filename=None, attribute=None, attributeValue=None, nolink=False):
"""Expand variable to dictionary
Args:
filename {str} -- Full path to file (default: {None})
attribute {str} -- class attribute name (default: {None})
attributeValue {str} -- value of the class attribute name (default: {None})
nolink {bool} -- Consider linked files (default: {False})
Returns:
{dict}: expanded variables from call + base set of variables
"""
_res = self.GetItemsFor(filename=filename,
classifier=Variable.CLASSIFIER,
attribute=attribute,
attributeValue=attributeValue,
nolink=nolink)
_exp = {
"PN": guess_recipe_name(filename),
"PV": guess_recipe_version(filename),
"BPN": guess_base_recipe_name(filename)
}
_exp = dict(list(_exp.items()) + list(CONSTANTS.SetsBase.items()))
for item in sorted(_res, key=lambda x: x.Line):
varop = item.VarOp
name = item.VarNameComplete
if item.Flag:
continue
if name not in _exp.keys():
_exp[name] = None
if varop in [" = ", " := "]:
if not item.IsAppend() and "remove" not in item.SubItems:
_exp[name] = item.VarValueStripped
elif varop == " ?= " and _exp[name] is None:
_exp[name] = item.VarValueStripped
elif varop == " ??= " and _exp[name] is None:
_exp[name] = item.VarValueStripped
elif varop == " += ":
if _exp[name] is None:
_exp[name] = ""
_exp[name] += " " + item.VarValueStripped
elif varop == " =+ ":
if _exp[name] is None:
_exp[name] = ""
_exp[name] = item.VarValueStripped + " " + _exp[name]
elif varop in [" .= "]:
if _exp[name] is None:
_exp[name] = ""
_exp[name] += item.VarValueStripped
elif varop in [" =. "]:
if _exp[name] is None:
_exp[name] = ""
_exp[name] = item.VarValueStripped + _exp[name]
# and now for a second run with special
for item in sorted(_res, key=lambda x: x.Line):
varop = item.VarOp
name = item.VarNameComplete
if item.Flag:
continue
if name not in _exp.keys():
_exp[name] = None
if _exp[name] is None:
_exp[name] = ""
_exp[name] += item.VarValueStripped
elif "append" in item.SubItems:
if _exp[name] is None:
_exp[name] = ""
_exp[name] += item.VarValueStripped
elif "prepend" in item.SubItems:
if _exp[name] is None:
_exp[name] = ""
_exp[name] = item.VarValueStripped + _exp[name]
# and now for the run with remove
for item in sorted(_res, key=lambda x: x.Line):
varop = item.VarOp
name = item.VarNameComplete
if item.Flag:
continue
if name not in _exp.keys():
_exp[name] = None
if "remove" in item.SubItems:
if _exp[name] is None:
_exp[name] = ""
_exp[name] = _exp[name].replace(item.VarValueStripped, "")
# final run and explode the settings
_finalexp = {}
for k,v in _exp.items():
_newkey = expand_term(self, filename, k)
if _newkey not in _finalexp:
_finalexp[_newkey] = []
_finalexp[_newkey] += Item.safe_linesplit(expand_term(self, filename, v or ""))
return _finalexp
|
from matplotlib import pyplot as plt
from epyseg.deeplearning.augmentation.generators.data import DataGenerator
from epyseg.deeplearning.augmentation.generators.meta import MetaGenerator
import numpy as np
# logging
from epyseg.tools.logger import TA_logger
logger = TA_logger()
# MINIMAL_AUGMENTATIONS = [{'type': None}, {'type': None},{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'}, {'type': 'translate'}, {'type': 'rotate'}]
# added intensity shifts to the minimal augmentation --> should make it more robust for masking
MINIMAL_AUGMENTATIONS = [{'type': None}, {'type': None},{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'}, {'type': 'translate'}, {'type': 'rotate'},{'type': 'random_intensity_gamma_contrast'}, {'type': 'intensity'}, {'type': 'random_intensity_gamma_contrast'}, {'type': 'intensity'}]
ALL_AUGMENTATIONS_BUT_INVERT_AND_HIGH_NOISE = [{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'},
{'type': 'translate'},
{'type': 'shear'}, {'type': 'flip'}, {'type': 'rotate'},
{'type': 'low noise'}, {'type': 'high noise'}, {'type': 'stretch'}]
ALL_AUGMENTATIONS_BUT_INVERT = [{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'},
{'type': 'translate'},
{'type': 'shear'}, {'type': 'flip'}, {'type': 'rotate'}, {'type': 'low noise'},
{'type': 'high noise'}, {'type': 'stretch'}]
ALL_AUGMENTATIONS_BUT_INVERT_AND_NOISE = [{'type': None}, {'type': 'zoom'}, {'type': 'blur'},
{'type': 'translate'}, {'type': 'shear'},
{'type': 'flip'}, {'type': 'rotate'}, {'type': 'stretch'},
{'type': 'rotate (interpolation free)'},
{'type': 'rotate (interpolation free)'},
{'type': 'rotate (interpolation free)'}]
ALL_AUGMENTATIONS = [{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'}, {'type': 'translate'},
{'type': 'shear'}, {'type': 'flip'}, {'type': 'rotate'}, {'type': 'invert'}, {'type': 'low noise'},
{'type': 'high noise'}, {'type': 'stretch'}]
ALL_AUGMENTATIONS_BUT_HIGH_NOISE = [{'type': None}, {'type': None}, {'type': 'zoom'}, {'type': 'blur'},
{'type': 'translate'},
{'type': 'shear'}, {'type': 'flip'}, {'type': 'rotate'}, {'type': 'invert'},
{'type': 'low noise'}, {'type': 'stretch'}]
STRETCHED_AUG_EPITHELIA = [{'type': None}, {'type': 'stretch'}, {'type': 'stretch'},
{'type': 'stretch'}, {'type': 'invert'}, {'type': 'flip'}, {'type': 'translate'},
{'type': 'zoom'}, {'type': 'blur'}, {'type': 'shear'}, {'type': 'rotate'}, {'type': 'low noise'}]
STRETCHED_AUG_EPITHELIA_2 = [{'type': None}, {'type': None}, {'type': None}, {'type': None}, {'type': 'stretch'}, {'type': 'stretch'},
{'type': 'stretch'}, {'type': 'invert'},{'type': 'invert'},{'type': 'invert'},{'type': 'invert'}, {'type': 'flip'}, {'type': 'translate'},
{'type': 'zoom'}, {'type': 'blur'}, {'type': 'shear'}, {'type': 'rotate'}, {'type': 'low noise'}]
STRETCHED_AUG_EPITHELIA_3 = [{'type': None}, {'type': None}, {'type': None}, {'type': None}, {'type': 'stretch'}, {'type': 'stretch'},
{'type': 'stretch'}, {'type': 'flip'}, {'type': 'translate'},
{'type': 'zoom'}, {'type': 'blur'}, {'type': 'shear'}, {'type': 'rotate'}, {'type': 'low noise'},{'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}]
STRETCHED_AUG_EPITHELIA_4 = [{'type': None}, {'type': None}, {'type': 'stretch'}, {'type': 'stretch'},
{'type': 'stretch'}, {'type': 'flip'}, {'type': 'translate'},{'type': 'flip'}, {'type': 'zoom'}, {'type': 'shear'}, {'type': 'rotate'}, {'type': 'rotate'}, {'type': 'rotate'}, {'type': 'rotate'},
{'type': 'zoom'}, {'type': 'blur'}, {'type': 'shear'}, {'type': 'rotate'}, {'type': 'low noise'},{'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}]
TRAINING_FOR_BEGINNING_LITTLE_INTERPOLATION = [{'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}, {'type': 'rotate (interpolation free)'}, {'type': None}, {'type': 'flip'}, {'type': 'translate'}, {'type': 'blur'}]
NO_AUGMENTATION = [{'type': None}]
TEST_AUGMENTATION = [{'type': 'invert'}]
SAFE_AUGMENTATIONS_FOR_SINGLE_PIXEL_WIDE = [{'type': None}, {'type': 'blur'}, {'type': 'translate'}, {'type': 'flip'}]
SAFE_AUGMENTATIONS_FOR_SINGLE_PIXEL_WIDE_PLUS_INVERT_AND_NOISE = [{'type': None}, {'type': 'blur'},
{'type': 'translate'}, {'type': 'flip'},
{'type': 'invert'}, {'type': 'low noise'}]
class MetaAugmenter:
def __init__(self, inputs=None, outputs=None, output_folder=None, input_shape=(None, None, None, 1),
output_shape=(None, None, None, 1), input_channel_of_interest=None, output_channel_of_interest=None,
input_channel_reduction_rule='copy channel of interest to all channels',
input_channel_augmentation_rule='copy channel of interest to all channels',
output_channel_reduction_rule='copy channel of interest to all channels',
output_channel_augmentation_rule='copy channel of interest to all channels',
augmentations=None, crop_parameters=None, mask_dilations=None, infinite=False,
default_input_tile_width=128, default_input_tile_height=128,
default_output_tile_width=128, default_output_tile_height=128,
keep_original_sizes=False,
input_normalization={'method': 'Rescaling (min-max normalization)', 'range': [0, 1],
'individual_channels': True},
output_normalization={'method': 'Rescaling (min-max normalization)', 'range': [0, 1],
'individual_channels': True},
validation_split=0, test_split=0,
shuffle=True, clip_by_frequency=None, is_predict_generator=False, overlap_x=0, overlap_y=0,
batch_size=None, batch_size_auto_adjust=False, invert_image=False, input_bg_subtraction=None, create_epyseg_style_output=None, remove_n_border_mask_pixels=None,
is_output_1px_wide=False, rebinarize_augmented_output=False,
rotate_n_flip_independently_of_augmentation=False,
mask_lines_and_cols_in_input_and_mask_GT_with_nans=None, # should be 'id' or 'noid' and requires a custom loss and metrics --> can only be applied with some losses
z_frames_to_add=None,
**kwargs):
self.augmenters = []
self.inputs = inputs
self.outputs = outputs
self.output_folder = output_folder
self.input_shape = input_shape
self.output_shape = output_shape
self.input_channel_of_interest = input_channel_of_interest
self.output_channel_of_interest = output_channel_of_interest
self.input_channel_reduction_rule = input_channel_reduction_rule
self.input_channel_augmentation_rule = input_channel_augmentation_rule
self.output_channel_reduction_rule = output_channel_reduction_rule
self.output_channel_augmentation_rule = output_channel_augmentation_rule
self.augmentations = augmentations
self.crop_parameters = crop_parameters
self.batch_size = batch_size
self.batch_size_auto_adjust = batch_size_auto_adjust
self.invert_image = invert_image
self.input_bg_subtraction = input_bg_subtraction
self.create_epyseg_style_output=create_epyseg_style_output
self.remove_n_border_mask_pixels = remove_n_border_mask_pixels
self.is_output_1px_wide = is_output_1px_wide
self.rebinarize_augmented_output = rebinarize_augmented_output
self.rotate_n_flip_independently_of_augmentation = rotate_n_flip_independently_of_augmentation
self.mask_lines_and_cols_in_input_and_mask_GT_with_nans = mask_lines_and_cols_in_input_and_mask_GT_with_nans
self.z_frames_to_add = z_frames_to_add
self.mask_dilations = mask_dilations
self.infinite = infinite
self.default_input_tile_width = default_input_tile_width
self.default_input_tile_height = default_input_tile_height
self.default_output_tile_width = default_output_tile_width
self.default_output_tile_height = default_output_tile_height
self.keep_original_sizes = keep_original_sizes
self.input_normalization = input_normalization
self.output_normalization = output_normalization
self.validation_split = validation_split
self.test_split = test_split
self.shuffle = shuffle
self.clip_by_frequency = clip_by_frequency
self.is_predict_generator = is_predict_generator
self.overlap_x = overlap_x
self.overlap_y = overlap_y
if inputs is not None:
for i, inp in enumerate(inputs):
if outputs is not None:
cur_output = outputs[i]
else:
cur_output = None
self.augmenters.append(
DataGenerator(inputs=inp, outputs=cur_output, output_folder=output_folder, input_shape=input_shape,
output_shape=output_shape, input_channel_of_interest=input_channel_of_interest,
output_channel_of_interest=output_channel_of_interest,
input_channel_reduction_rule=input_channel_reduction_rule,
input_channel_augmentation_rule=input_channel_augmentation_rule,
output_channel_reduction_rule=output_channel_reduction_rule,
output_channel_augmentation_rule=output_channel_augmentation_rule,
augmentations=augmentations, crop_parameters=crop_parameters,
mask_dilations=mask_dilations,
infinite=infinite, default_input_tile_width=default_input_tile_width,
default_input_tile_height=default_input_tile_height,
default_output_tile_width=default_output_tile_width,
default_output_tile_height=default_output_tile_height,
keep_original_sizes=keep_original_sizes,
input_normalization=input_normalization,
output_normalization=output_normalization,
validation_split=validation_split, test_split=test_split,
shuffle=shuffle,
clip_by_frequency=clip_by_frequency,
is_predict_generator=is_predict_generator, overlap_x=overlap_x, overlap_y=overlap_y,
invert_image=invert_image, input_bg_subtraction=input_bg_subtraction, create_epyseg_style_output=create_epyseg_style_output, remove_n_border_mask_pixels=remove_n_border_mask_pixels,
is_output_1px_wide=is_output_1px_wide,
rebinarize_augmented_output=rebinarize_augmented_output,
rotate_n_flip_independently_of_augmentation=rotate_n_flip_independently_of_augmentation,
mask_lines_and_cols_in_input_and_mask_GT_with_nans=mask_lines_and_cols_in_input_and_mask_GT_with_nans,
z_frames_to_add = z_frames_to_add
))
def _get_significant_parameter(self, local_param, global_param):
if local_param is not None:
return local_param
else:
return global_param
def appendDatasets(self, datasets=None, augmentations=None, **kwargs):
logger.debug('datasets ' + str(datasets))
logger.debug('augs ' + str(augmentations))
if datasets is None:
return
# parse and handle inputs
for dataset in datasets:
fused = {**dataset, 'augmentations': augmentations}
# print('fused', fused)
self.append(**fused)
def append(self, inputs=None, outputs=None, output_folder=None, input_shape=None, output_shape=None,
input_channel_of_interest=None, output_channel_of_interest=None,
input_channel_reduction_rule=None, input_channel_augmentation_rule=None,
output_channel_reduction_rule=None, output_channel_augmentation_rule=None,
augmentations=None, crop_parameters=None, mask_dilations=None, infinite=None,
default_input_tile_width=None, default_input_tile_height=None, default_output_tile_width=None,
default_output_tile_height=None, keep_original_sizes=None, input_normalization=None,
output_normalization=None, validation_split=None, test_split=None,
shuffle=None, clip_by_frequency=None,
is_predict_generator=None, overlap_x=None, overlap_y=None, invert_image=None, input_bg_subtraction=None,create_epyseg_style_output=None,
remove_n_border_mask_pixels=None, is_output_1px_wide=None, rebinarize_augmented_output=None,
rotate_n_flip_independently_of_augmentation=None,mask_lines_and_cols_in_input_and_mask_GT_with_nans=None,
z_frames_to_add = None,
**kwargs):
# print('debug 123', inputs, outputs, self.inputs, self.outputs)
# inputs and outputs are ok --> why is there a bug then????
self.augmenters.append(
DataGenerator(inputs=self._get_significant_parameter(inputs, self.inputs),
outputs=self._get_significant_parameter(outputs, self.outputs),
output_folder =self._get_significant_parameter(output_folder, self.output_folder),
input_shape=self._get_significant_parameter(input_shape, self.input_shape),
output_shape=self._get_significant_parameter(output_shape, self.output_shape),
input_channel_of_interest=self._get_significant_parameter(input_channel_of_interest,
self.input_channel_of_interest),
output_channel_of_interest=self._get_significant_parameter(output_channel_of_interest,
self.output_channel_of_interest),
input_channel_reduction_rule=self._get_significant_parameter(input_channel_reduction_rule,
self.input_channel_reduction_rule),
input_channel_augmentation_rule=self._get_significant_parameter(
input_channel_augmentation_rule, self.input_channel_augmentation_rule),
output_channel_reduction_rule=self._get_significant_parameter(output_channel_reduction_rule,
self.output_channel_reduction_rule),
output_channel_augmentation_rule=self._get_significant_parameter(
output_channel_augmentation_rule, self.output_channel_augmentation_rule),
augmentations=self._get_significant_parameter(augmentations, self.augmentations),
crop_parameters=self._get_significant_parameter(crop_parameters, self.crop_parameters),
mask_dilations=self._get_significant_parameter(mask_dilations, self.mask_dilations),
infinite=self._get_significant_parameter(infinite, self.infinite),
default_input_tile_width=self._get_significant_parameter(default_input_tile_width,
self.default_input_tile_width),
default_input_tile_height=self._get_significant_parameter(default_input_tile_height,
self.default_input_tile_height),
default_output_tile_width=self._get_significant_parameter(default_output_tile_width,
self.default_output_tile_width),
default_output_tile_height=self._get_significant_parameter(default_output_tile_height,
self.default_output_tile_height),
keep_original_sizes=self._get_significant_parameter(keep_original_sizes,
self.keep_original_sizes),
validation_split=self._get_significant_parameter(validation_split, self.validation_split),
test_split=self._get_significant_parameter(test_split, self.test_split),
shuffle=self._get_significant_parameter(shuffle, self.shuffle),
clip_by_frequency=self._get_significant_parameter(clip_by_frequency, self.clip_by_frequency),
is_predict_generator=self._get_significant_parameter(is_predict_generator,
self.is_predict_generator),
overlap_x=self._get_significant_parameter(overlap_x, self.overlap_x),
overlap_y=self._get_significant_parameter(overlap_y, self.overlap_y),
invert_image=self._get_significant_parameter(invert_image, self.invert_image),
input_bg_subtraction=self._get_significant_parameter(input_bg_subtraction, self.input_bg_subtraction),
create_epyseg_style_output=self._get_significant_parameter(create_epyseg_style_output, self.create_epyseg_style_output),
remove_n_border_mask_pixels=self._get_significant_parameter(remove_n_border_mask_pixels,
self.remove_n_border_mask_pixels),
input_normalization=self._get_significant_parameter(input_normalization,
self.input_normalization),
output_normalization=self._get_significant_parameter(output_normalization,
self.output_normalization),
is_output_1px_wide=self._get_significant_parameter(is_output_1px_wide,
self.is_output_1px_wide),
rebinarize_augmented_output=self._get_significant_parameter(rebinarize_augmented_output,
self.rebinarize_augmented_output),
rotate_n_flip_independently_of_augmentation=self._get_significant_parameter(rotate_n_flip_independently_of_augmentation,
self.rotate_n_flip_independently_of_augmentation),
mask_lines_and_cols_in_input_and_mask_GT_with_nans=self._get_significant_parameter(mask_lines_and_cols_in_input_and_mask_GT_with_nans,
self.mask_lines_and_cols_in_input_and_mask_GT_with_nans),
z_frames_to_add=self._get_significant_parameter(z_frames_to_add, self.z_frames_to_add),
))
def validation_generator(self, infinite=False):
if infinite:
while True:
for orig, label in self._validation_generator(skip_augment=True):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
else:
for orig, label in self._validation_generator(skip_augment=True):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
def train_generator(self, infinite=False):
if infinite:
while True:
for orig, label in self._train_generator(skip_augment=False):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
else:
for orig, label in self._train_generator(skip_augment=False):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
def test_generator(self, infinite=False):
if infinite:
while True:
for orig, label in self._test_generator(skip_augment=True):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
else:
for orig, label in self._test_generator(skip_augment=True):
# bug fix for recent tensorflow that really needs true and pred to be unpacked if single input and output
if len(orig) == 1:
orig = orig[0]
if len(label) == 1:
label = label[0]
yield orig, label
def angular_yielder(self, orig, mask, count):
# mask = self.extra_watershed_mask(mask) # shrink mask to 1 px wide irrespective of transfo
# NB could do here the generations of the nine stacks --> TODO --> would increase size by 9 but it is a good idea I think
# can also copy the code of the other stuff
if count == 0:
# rot 180
return np.rot90(orig, 2, axes=(-3, -2)), np.rot90(mask, 2, axes=(-3, -2))
if count == 1:
# flip hor
return np.flip(orig, -2), np.flip(mask, -2)
if count == 2:
# flip ver
return np.flip(orig, -3), np.flip(mask, -3)
# make it yield the original and the nine versions of it
# --> TODO
# ça marche ça me genere les 9 versions du truc dans tous les sens --> probablement ce que je veux --> tt mettre ici
if count == 3:
# yield np.rot90(orig, axes=(-3, -2)), np.rot90(mask, axes=(-3, -2))
# rot 90
return np.rot90(orig, axes=(-3, -2)), np.rot90(mask, axes=(-3, -2))
if count == 4:
# rot 90_flipped_hor or ver
return np.flip(np.rot90(orig, axes=(-3, -2)), -2), np.flip(np.rot90(mask, axes=(-3, -2)), -2)
if count == 5:
# rot 90_flipped_hor or ver
return np.flip(np.rot90(orig, axes=(-3, -2)), -3), np.flip(np.rot90(mask, axes=(-3, -2)), -3)
if count == 6:
# rot 270
return np.rot90(orig, 3, axes=(-3, -2)), np.rot90(mask, 3, axes=(-3, -2))
def _train_generator(self, skip_augment, first_run=False):
train = MetaGenerator(self.augmenters, shuffle=self.shuffle, batch_size=self.batch_size, gen_type='train')
for out in train.generator(skip_augment, first_run):
try:
# # print(len(out))
# # that works check that all are there and all are possible otherwise skip
# # --> need ensure that width = height
# # need set a parameter to be sure to use it or not and need remove rotation and flip from augmentation list (or not in fact)
# orig, mask = out
# augmentations = 7
# if orig[0].shape[-2] != orig[0].shape[-3]:
# augmentations = 3
# for aug in range(augmentations):
# yield self.angular_yielder(orig, mask, aug)
# yield orig, mask
yield out
except:
# failed to generate output --> continue
continue
def _test_generator(self, skip_augment, first_run=False):
test = MetaGenerator(self.augmenters, shuffle=False, batch_size=self.batch_size, gen_type='test')
for out in test.generator(skip_augment, first_run):
# # yield out
# # print(len(out))
# # that works check that all are there and all are possible otherwise skip
# # --> need ensure that width = height
# # need set a parameter to be sure to use it or not and need remove rotation and flip from augmentation list (or not in fact)
# orig, mask = out
# augmentations = 7
# if orig[0].shape[-2] != orig[0].shape[-3]:
# augmentations = 3
# for aug in range(augmentations):
# yield self.angular_yielder(orig, mask, aug)
# yield orig, mask
yield out
def _validation_generator(self, skip_augment, first_run=False):
valid = MetaGenerator(self.augmenters, shuffle=self.shuffle, batch_size=self.batch_size, gen_type='valid')
for out in valid.generator(skip_augment, first_run):
# # yield out
# # print(len(out))
# # that works check that all are there and all are possible otherwise skip
# # --> need ensure that width = height
# # need set a parameter to be sure to use it or not and need remove rotation and flip from augmentation list (or not in fact)
# orig, mask = out
# augmentations = 7
# if orig[0].shape[-2] != orig[0].shape[-3]:
# augmentations = 3
# for aug in range(augmentations):
# yield self.angular_yielder(orig, mask, aug)
# yield orig, mask
yield out
def predict_generator(self): # TODO can use datagen for now
pass
def __len__(self):
# returns the nb of datasets
if not self.augmenters:
return 0
return len(self.augmenters)
# returns the real nb of batches with the current parameters...
def get_train_length(self, first_run=False):
# need run the train algo once with real tiled data to get the counts
train_generator = self._train_generator(skip_augment=True, first_run=first_run)
nb_batches = 0
for _, _ in train_generator:
nb_batches += 1
return nb_batches
def get_test_length(self, first_run=False):
# need run the train algo once with real tiled data to get the counts
test_generator = self._test_generator(skip_augment=True, first_run=first_run)
nb_batches = 0
for _, _ in test_generator:
nb_batches += 1
return nb_batches
def get_validation_length(self, first_run=False):
# need run the train algo once with real tiled data to get the counts
validation_generator = self._validation_generator(skip_augment=True, first_run=first_run)
nb_batches = 0
for _, _ in validation_generator:
nb_batches += 1
return nb_batches
if __name__ == '__main__':
pass
|
import anachronos
from e2e_test.runner import http
from e2e_test.testing_messages import SIMPLE_GET, SIMPLE_POST_DTO, DIFFERENT_POST_DTO, GET_WITH_PARAMETERS, \
GET_WITH_PATH_PARAMETER
class SimpleResourceTest(anachronos.TestCase):
def test_simple_get(self):
http.get("/")
self.assertThat(SIMPLE_GET).is_stored()
def test_post_dto(self):
response = http.post("/", json={'name': 'Paul Atreides', 'age': 17}).json()
self.assertThat(SIMPLE_POST_DTO).is_stored()
self.assertEqual('Paul Atreides', response['name'])
def test_selectRightResourceMethodBasedOnPayloadDtoType(self):
response = http.post("/", json={'name': 'Paul Atreides', 'role': "foobar"}).json()
self.assertThat(DIFFERENT_POST_DTO).is_stored()
self.assertEqual('Paul Atreides', response['name'])
def test_givenMissingOrMalformedPayloadDto_thenReturn400BadRequest(self):
response = http.post("/", json={})
self.assertEqual(400, response.status_code)
def test_getWithQueryParameters(self):
response = http.get("/params?query=foo&age=13")
self.assertEqual(200, response.status_code)
self.assertThat(GET_WITH_PARAMETERS).is_contained()
def test_givenMissingOrMalformedQueryParams_thenReturn400BadRequest(self):
response = http.get("/params?query=foo&age=a")
self.assertEqual(400, response.status_code)
self.assertThat(GET_WITH_PARAMETERS + " foo a").is_never_stored()
def test_withPathParameter(self):
http.get("/path/foobar")
self.assertThat(GET_WITH_PATH_PARAMETER + " foobar").is_stored()
if __name__ == '__main__':
anachronos.run_tests()
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
data_connect = pd.read_csv('C:/Users/sch/PycharmProjects/pythonProject3/csv/connect.csv')
connect = data_connect['connect']
connect_ad = data_connect['connect_ad']
connect_k8s = data_connect['connect_k8s']
connect_hyscal = data_connect['connect_hyscal']
data = np.zeros((5, 4))
data[0, 0] = np.mean(connect[0:20])
data[1, 0] = np.mean(connect[0:40])
data[2, 0] = np.mean(connect[0:60])
data[3, 0] = np.mean(connect[0:80])
data[4, 0] = np.mean(connect[0:100])
data[0, 1] = np.mean(connect_ad[0:20])
data[1, 1] = np.mean(connect_ad[0:40])
data[2, 1] = np.mean(connect_ad[0:60])
data[3, 1] = np.mean(connect_ad[0:80])
data[4, 1] = np.mean(connect_ad[0:100])
data[0, 2] = np.mean(connect_k8s[0:20])
data[1, 2] = np.mean(connect_k8s[0:40])
data[2, 2] = np.mean(connect_k8s[0:60])
data[3, 2] = np.mean(connect_k8s[0:80])
data[4, 2] = np.mean(connect_k8s[0:100])
data[0, 3] = np.mean(connect_hyscal[0:20])
data[1, 3] = np.mean(connect_hyscal[0:40])
data[2, 3] = np.mean(connect_hyscal[0:60])
data[3, 3] = np.mean(connect_hyscal[0:80])
data[4, 3] = np.mean(connect_hyscal[0:100])
names = ['connect', 'connect_ad', 'connect_k8s', 'connect_hyscal']
data = pd.DataFrame(data=data)
data.to_csv('C:/Users/sch/PycharmProjects/pythonProject3/csv/connect_avg.csv', header=names)
|
"""
Hamilitonian Monte Carlo implementation.
Adapted from https://github.com/franrruiz/vcd_divergence/blob/master/mcmc/hmc_vae.m
"""
import torch
import utils
def hmc_vae(current_q, model, img, epsilon=None, Burn=3, T=10, adapt=0, L=5):
"""
Hamilitonian Monte Carlo sampler.
:param current_q: initial samples
:param model: vae model for probability computations
:param img: datapoints
:param epsilon: initial step size
:param Burn: number of burn in iterations
:param T: number of MC iterations
:param adapt: 1 if step size should be adapted during burn in
:param L: number of leapfrog steps
:return: final samples, all produced samples, average acceptance rate, adapted step size
"""
if epsilon is None:
epsilon = 0.5 / current_q.size(1)
N = current_q.size(0)
n = current_q.size(1)
acceptHist = torch.zeros((N, Burn + T), device=current_q.device)
logpxzHist = torch.zeros((N, Burn + T), device=current_q.device)
samples = torch.zeros((N, n, T), device=current_q.device)
if (Burn + T) == 0:
z = current_q
delta = -1
accRate = 0
return z, samples, accRate, delta
eta = 0.01
opt = 0.9
cnt = 0
for i in range(0, Burn + T - 1):
q = current_q
p = torch.normal(mean=0., std=1., size=(N, n), device=current_q.device)
current_p = p
pred = model.dec_forward(q)
log_pxz = utils.log_pxz(pred, img, q)
gradz = torch.autograd.grad(torch.sum(log_pxz), q)[0]
current_U = - log_pxz
grad_U = - gradz
p = p - epsilon * grad_U / 2
for j in range(0, L - 1):
q = q + epsilon * p
if j != L:
pred = model.dec_forward(q)
log_pxz = utils.log_pxz(pred, img, q)
gradz = torch.autograd.grad(torch.sum(log_pxz), q)[0]
proposed_U = - log_pxz
grad_U = - gradz
p = p - epsilon * grad_U
pred = model.dec_forward(q)
log_pxz = utils.log_pxz(pred, img, q)
gradz = torch.autograd.grad(torch.sum(log_pxz), q)[0]
proposed_U = - log_pxz
grad_U = - gradz
p = p - epsilon * grad_U / 2
p = -p
current_K = torch.sum(current_p ** 2, 1) / 2
proposed_K = torch.sum(p ** 2, 1) / 2
accept = torch.normal(mean=torch.zeros(N), std=torch.ones(N)).to(current_q.device) < torch.exp(
current_U - proposed_U + current_K - proposed_K)
acceptHist[:, i] = accept
current_q = torch.where(accept.unsqueeze(1), q, current_q)
current_U = torch.where(accept, proposed_U, current_U)
if (i < Burn) and (adapt == 1):
change = eta * ((torch.mean(accept.to(torch.float32)) - opt) / opt)
epsilon = epsilon + change * epsilon
elif i >= Burn:
cnt = cnt + 1
samples[:, :, cnt] = current_q
logpxzHist[:, i] = - current_U
z = current_q
return z, samples, torch.mean(acceptHist.to(torch.float32), 1), epsilon
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import time
from tqdm import tqdm
def test_data_iterator(di, args):
current_epoch = -1
logger.info('{}'.format(di.size))
pbar = None
for data in di:
time.sleep(args.wait / 1000.0)
if di.epoch >= args.max_epoch:
break
if current_epoch != di.epoch:
current_epoch = di.epoch
if pbar is not None:
pbar.close()
logger.info('Epoch {}'.format(current_epoch))
pbar = tqdm(total=di.size)
pbar.update(len(data[0]))
if __name__ == '__main__':
from nnabla.logger import logger
from nnabla.config import nnabla_config
parser = argparse.ArgumentParser(description='Data iterator sample.')
parser.add_argument('-m', '--memory_cache', action='store_true',
help='Use memory cache')
parser.add_argument('-f', '--file_cache', action='store_true',
help='Use file cache')
parser.add_argument('-S', '--shuffle', action='store_true',
help='Enable shuffling data')
parser.add_argument('-b', '--batch_size', type=int, default=64,
help='Batch size')
parser.add_argument('-s', '--cache_size', type=int, default=100,
help='Cache size (num of data).')
parser.add_argument('-M', '--memory_size', type=int, default=1048576,
help='Memory buffer size in byte.')
parser.add_argument('-o', '--output', type=str, default=None,
help='If specified, cache data will output to here.')
parser.add_argument('-n', '--normalize', action='store_true',
help='Enable data normalize')
parser.add_argument('-e', '--max_epoch', type=int, default=3,
help='Max epoch to read.')
parser.add_argument('-w', '--wait', type=float, default=0,
help='Wait time for dummy data processing.')
parser.add_argument('uri', help='PATH to CSV_DATASET format file or '
'"MNIST_TRAIN", "MNIST_TEST", "TINY_IMAGENET_TRAIN",'
'"TINY_IMAGENET_VAL"')
args = parser.parse_args()
logger.debug('memory_cache: {}'.format(args.memory_cache))
logger.debug('file_cache: {}'.format(args.file_cache))
logger.debug('shuffle: {}'.format(args.shuffle))
logger.debug('batch_size: {}'.format(args.batch_size))
logger.debug('cache_size: {}'.format(args.cache_size))
logger.debug('memory_size: {}'.format(args.memory_size))
logger.debug('output: {}'.format(args.output))
logger.debug('normalize: {}'.format(args.normalize))
logger.debug('max_epoch: {}'.format(args.max_epoch))
logger.debug('wait: {}'.format(args.wait))
nnabla_config.set('DATA_ITERATOR', 'data_source_file_cache_size',
'{}'.format(args.cache_size))
nnabla_config.set('DATA_ITERATOR', 'data_source_buffer_max_size',
'{}'.format(args.memory_size))
if args.uri == 'MNIST_TRAIN':
sys.path.append(os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'vision', 'mnist'))
from mnist_data import data_iterator_mnist
with data_iterator_mnist(args.batch_size,
True,
None,
args.shuffle,
args.memory_cache,
args.file_cache) as di:
test_data_iterator(di, args)
elif args.uri == 'MNIST_TEST':
sys.path.append(os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'vision', 'mnist'))
from mnist_data import data_iterator_mnist
with data_iterator_mnist(args.batch_size,
False,
None,
args.shuffle,
args.memory_cache,
args.file_cache) as di:
test_data_iterator(di, args)
elif args.uri == 'TINY_IMAGENET_TRAIN':
sys.path.append(os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'vision', 'imagenet'))
from tiny_imagenet_data import data_iterator_tiny_imagenet
with data_iterator_tiny_imagenet(args.batch_size, 'train') as di:
test_data_iterator(di, args)
elif args.uri == 'TINY_IMAGENET_VAL':
sys.path.append(os.path.join(os.path.dirname(
os.path.abspath(__file__)), '..', 'vision', 'imagenet'))
from tiny_imagenet_data import data_iterator_tiny_imagenet
with data_iterator_tiny_imagenet(args.batch_size, 'val') as di:
test_data_iterator(di, args)
else:
if os.path.splitext(args.uri)[1].lower() == '.cache':
from nnabla.utils.data_iterator import data_iterator_cache
with data_iterator_cache(uri=args.uri,
batch_size=args.batch_size,
shuffle=args.shuffle,
with_memory_cache=args.memory_cache,
normalize=args.normalize) as di:
test_data_iterator(di, args)
else:
from nnabla.utils.data_iterator import data_iterator_csv_dataset
with data_iterator_csv_dataset(uri=args.uri,
batch_size=args.batch_size,
shuffle=args.shuffle,
normalize=args.normalize,
with_memory_cache=args.memory_cache,
with_file_cache=args.file_cache,
cache_dir=args.output) as di:
test_data_iterator(di, args)
|
from .user import CustomUserAdmin
from ... import forms
class StudentAdmin(CustomUserAdmin):
form = forms.StudentChangeForm
add_form = forms.StudentCreationForm
list_filter = CustomUserAdmin.list_filter + ('supervisor', )
list_display = CustomUserAdmin.list_display + ('supervisor',)
fieldsets = CustomUserAdmin.fieldsets + (
(None, {'fields' : ('supervisor',)},),
)
add_fieldsets = CustomUserAdmin.add_fieldsets + (
(None, {
'classes' : ('wide',),
'fields' : ('supervisor',)}
),
)
|
import numpy as np
class Node:
def __init__(self, name_initial, id):
self.id = id
self.nextNodes = []
self.name_initial = name_initial
if name_initial == 'transition':
self.listExpectedType = Place
elif name_initial == 'places':
self.listExpectedType = Transition
else:
raise ValueError('Error!', 'No se configuro un tipo de nodo')
def getName(self):
return "{}{}".format(self.name_initial, self.id)
def print(self):
print(self.getName(),'-> ', end='')
def printNextNames(self):
for node in self.nextNodes:
print(node.getName())
def addNext(self, node):
if isinstance(node, self.listExpectedType):
self.nextNodes.append(node)
else:
raise ValueError('Error de tipo de datos!', "No se puede agregar un {} a una lista que espera {}".format( node.__class__.__name__, self.listExpectedType.__name__) )
class Transition(Node):
def __init__(self, id):
Node.__init__(self, 'transition', id)
self.preconditions = []
self.wait_time = 15
self.time_waited = 0
self.action = self.doNothing # default action
def runAction(self):
print("Executing action for:", self.getName())
self.action()
def print(self, end_of_line = ''):
print("{}[{}]".format(self.getName(), self.time_waited),'-> ', end=end_of_line)
def doNothing(self):
pass
class Place(Node):
def __init__(self, id):
Node.__init__(self, 'places', id)
self.marks = 0
self.required_marks = 1
def print(self):
print("{}[{}]".format(self.getName(), self.marks),'-> ', end='')
class Network:
def __init__(self, places_list, transitions_list, initial_state_list = [], max_width = False, name = "", active = False):
self.places = places_list
self.transitions = transitions_list
self.configurePreconditions()
self.setInitialState(initial_state_list)
self.global_time = 0
self.max_width = max_width # "ancho" de la red. Se refiere a el numero de elementos (lugares y transiciones) unicos que se pueden recorrer antes de repetirse
self.name = name
self.active = active
def setInitialState(self, initial_state_list):
if initial_state_list:
if len(initial_state_list) == len(self.places):
for i in range(len(self.places)):
self.places[i].marks = initial_state_list[i]
else:
raise ValueError('Error!', 'Error en el numero de elementos en initial_state_list: se esperaban {} elementos y se recibieron {}.'.format(len(self.places), len(initial_state_list)))
def configurePreconditions(self):
for transition in self.transitions:
for place in self.places:
if transition in place.nextNodes:
transition.preconditions.append(place)
def nextStep(self, actual_time = 0):
if actual_time:
self.global_time = actual_time
self.global_time += 1
for transition in self.transitions: #? Por cada transicion ...
all_conditions_marked = True
#? ... validando que se cumplan todas las precondiciones ...
if transition.time_waited == 0: #? ... solo si no se esta en estado de espera de una transicion que ya cumplió sus precondiciones previamente
#? Recorriendo todos Place para verficar que se cumplan las marcas (se puede hacer esto porque python asigna objetos por referencia)
for precondition in transition.preconditions:
if precondition.marks < precondition.required_marks:
all_conditions_marked = False #! TODO: hacer que se puedan configurar multiples marcas
if all_conditions_marked: #? Cuando se cumplen todas las condiciones para la transicion...
if transition.time_waited == transition.wait_time: #? ... ver que se halla esperado el tiempo de espera de la transicion
print("(t={}, w={}) ".format(self.global_time, transition.time_waited), end='')
transition.runAction()
transition.time_waited = 0
#? Quitando las marcas de las precondiciones
for pre in transition.preconditions:
pre.marks = 0
#? y poniendoselas a los Place() siguientes
for pos in transition.nextNodes:
pos.marks += 1
else:
transition.time_waited += 1
def fastForward(self, number_of_steps):
for _ in range(number_of_steps):
self.nextStep()
def print(self, firstElements = True):
pointer = self.places[0]
for _ in range(self.max_width if self.max_width else len(self.places) + len(self.transitions)):
pointer.print()
if firstElements:
pointer = pointer.nextNodes[0]
else:
pointer = pointer.nextNodes[-1]
print()
def getMatrixPre(self, show=False, log=False):
# Generando la matriz PRE (condiciones que tiene que cumplir cada transicion para efectuarse)
# Debe quedar con las transiciones en el eje horizontal y los lugares en el eje vertical:
# t0 t1 t2
# p0 1 0 0
# p1 0 1 0
# p2 1 0 1
# De momento se genera con los ejes inversos y recorre diferente al imprimirlo en pantalla
pre = []
for transition in self.transitions:
pre_col = []
for place in self.places:
if transition in place.nextNodes:
pre_col.append(1)
else:
pre_col.append(0)
pre.append(pre_col)
pre_np = np.asarray(pre).transpose()
if log: np.savetxt("./logs/pre.csv", pre_np, delimiter=',')
if show:
print("PRE MATRIX:")
for i in range(len(pre[0])):
for j in range(len(pre)):
print(pre[j][i], end=' ')
print()
def getMatrixPos(self, show=False, log=False):
# Generando la matriz POS (condiciones que se cumplen luego de una transicion)
pos = []
for transition in self.transitions:
pos_col = []
for place in self.places:
if place in transition.nextNodes:
pos_col.append(1)
else:
pos_col.append(0)
pos.append(pos_col)
pos_np = np.asarray(pos).transpose()
if log: np.savetxt("./logs/pre.csv", pos_np, delimiter=',')
if show:
print("POS MATRIX:")
for i in range(len(pos[0])):
for j in range(len(pos)):
print(pos[j][i], end=' ')
print()
return pos_np
#? Regresa una lista de objetos Places() que tienen como ID el rango de numeros pasado como argumento
def generatePlaces(range_of_ids):
_p = []
for i in range_of_ids:
_p.append(Place(i)) # se le pasa i como argumento, que sera la id del Place()
return _p
#? Regresa una lista de objetos Transition() que tienen como ID el rango de numeros pasado como argumento
def generateTransitions(range_of_ids):
_t = []
for i in range_of_ids:
_t.append(Transition(i)) # se le pasa i como argumento, que sera la id del Transition()
return _t
#? Red para pruebas
def getDemoNetwork():
# Generando una lista de objetos Places() (Lugares) en una lista llamada 'places'
places = generatePlaces(range(6))
# Repitiendo el mismo proceso para generar los objetos tipo Transition() en la lista 'transition'
transition = generateTransitions(range(5))
# Estableciendo las relaciones entre Places y Transitions
places[0].addNext(transition[0])
transition[0].addNext(places[1])
places[1].addNext(transition[1])
transition[1].addNext(places[2])
places[2].addNext(transition[2])
transition[2].addNext(places[0])
#
places[3].addNext(transition[0])
transition[0].addNext(places[4])
places[4].addNext(transition[3])
transition[3].addNext(places[5])
places[5].addNext(transition[4])
transition[4].addNext(places[3])
#
initial_state = [1,0,0,1,0,0]
return Network(places, transition, initial_state, 6)
if __name__ == "__main__":
Petri = getDemoNetwork()
Petri.getMatrixPre(show=True)
Petri.getMatrixPos(show=True)
print("Acciones de las transiciones:")
Petri.fastForward(60)
print()
print("Estado final de la red:")
Petri.print()
print()
|
class DateException(Exception):
pass
class UnknownModeException(Exception):
pass
|
import json
import pandas as pd;
import matplotlib.pyplot as plt;
import seaborn as sns;
import folium as fol;
from config.settings import DATA_DIRS, STATICFILES_DIRS, TEMPLATES
# 인구이동
df = pd.read_excel(DATA_DIRS[0]+'//data1.xlsx',engine='openpyxl'
,header=0);
# 전력량
df2 = pd.read_excel(DATA_DIRS[0]+'//data2.xlsx',engine='openpyxl');
df3 = pd.read_csv(DATA_DIRS[0]+'//auto-mpg.csv',header=None);
df3.columns = ['mpg','cyl','dis','hor','wei','acc','year','origin','name'];
# 대학위치
df4 = pd.read_excel(DATA_DIRS[0]+'//data3.xlsx',engine='openpyxl');
# 경기도 행정 구역
df5 = pd.read_excel(DATA_DIRS[0]+'//data4.xlsx',engine='openpyxl');
# titanic
tt = sns.load_dataset('titanic');
class P109:
def mat01(self):
print(df);
df2 = df.fillna(method='ffill');
print(df2);
mask = (df2['전출지별'] == '서울특별시') & (df['전입지별'] != '서울특별시');
print(mask);
df_seoul = df2[mask];
print(df_seoul);
df_seoul = df_seoul.drop(['전출지별'], axis=1);
print(df_seoul);
df_seoul.rename({'전입지별':'전입지'}, axis=1,inplace=True);
print(df_seoul);
df_seoul.set_index('전입지',inplace=True);
print(df_seoul);
sr_one = df_seoul.loc['경기도'];
print(sr_one);
plt.plot(sr_one);
plt.title('서울 -> 경기');
plt.savefig(STATICFILES_DIRS[0]+'/ss.jpg');
#plt.show();
def mat02(self):
print(df);
df2 = df.fillna(method='ffill');
print(df2);
mask = (df2['전출지별'] == '서울특별시') & (df['전입지별'] != '서울특별시');
print(mask);
df_seoul = df2[mask];
print(df_seoul);
df_seoul = df_seoul.drop(['전출지별'], axis=1);
print(df_seoul);
df_seoul.rename({'전입지별':'전입지'}, axis=1,inplace=True);
print(df_seoul);
df_seoul.set_index('전입지',inplace=True);
print(df_seoul);
df3 = df_seoul.loc[['충청남도','강원도','충청북도','전라남도']];
print(df3);
#df3.loc['sum'] = df3.sum();
df3['sum'] = df3.sum(axis=1);
df3s = df3[['sum']].sort_values(by='sum');
print(df3s);
# plt.style.use('ggplot');
# df3t.index = df3t.index.map(int);
# df3t.plot(kind='barh',stacked=False,alpha=0.2,figsize=(10,5));
# plt.show();
def mat03(self,sy,ey):
#print(df2);
df3 = df2.loc[5:9];
df3.drop('전력량 (억㎾h)', axis=1, inplace=True);
df3.set_index('발전 전력별',inplace=True);
print(df3);
df3t = df3.T;
print(df3t);
df3t.drop('원자력', axis=1, inplace=True);
#print(df3t);
df3t =df3t.rename(columns={'합계':'총발전량'});
#print(df3t);
df3t['1년전'] = df3t['총발전량'].shift(1);
df3t['증감률'] = ((df3t['총발전량']/df3t['1년전'])-1) * 100;
df3t['증감률'].fillna(0,inplace=True);
df3t['year'] = df3t.index;
df3t['new_year'] = pd.to_datetime(df3t.index);
df3t['new_year'] = df3t['new_year'].dt.to_period(freq='A');
df3t.set_index(df3t['new_year'], inplace=True);
df3t = df3t[sy:ey];
print(df3t);
year = df3t['year'].tolist();
w = df3t['수력'].tolist();
f = df3t['화력'].tolist();
df3t['증감률'] = df3t['증감률']+100;
avg = df3t['증감률'].tolist();
result = {};
result['year'] = year;
result['w'] = w;
result['f'] = f;
result['avg'] = avg;
print(result);
return result;
def mat04(self):
#국가별 차량의 개수를 구하시오
print(df3);
df3['count'] = 1;
df4 = df3.groupby('origin').sum();
df4.index = ['USA','EU','JPN'];
print(df4);
def mat05(self):
df4 = df3[df3['origin']==1]['mpg']
print(df4);
def mat06(self):
print(tt);
tt2 = tt.pivot_table(index=['sex'],columns=['class'],aggfunc='size');
print(tt2);
def mat07(self):
seoul_map = fol.Map(location=[37.55,126.98],zoom_start=12);
seoul_map.save(TEMPLATES[0]['DIRS'][0]+'\\seoul_map.html');
def mat08(self):
seoul_map = fol.Map(location=[37.55,126.98],zoom_start=12);
print(df4);
df4.columns = ['name','lat','lng'];
df4.set_index('name',inplace=True);
for name,lat,lng in zip(df4.index,df4['lat'],df4['lng']):
#print(name,lat,lng);
fol.Marker([lat,lng],popup=name).add_to(seoul_map);
seoul_map.save(TEMPLATES[0]['DIRS'][0]+'\\seoul_coll.html');
def mat09(self,year):
df5.set_index('구분',inplace=True);
print(df5);
geo_path = DATA_DIRS[0]+'/data4.json';
geo_data = json.load(open(geo_path),encoding='utf-8');
print(geo_data);
map = fol.Map(location=[37.5502, 126.982], zoom_start=9);
fol.Choropleth(
geo_data= geo_data,
data = df5[year],
columns=[df5.index,df5[year]],
fill_color='YlOrRd', fill_opacity=0.7,line_opacity=0.3,
threshold_scale = [10000,100000,300000,500000,700000],
key_on='feature.properties.name'
).add_to(map);
map.save(TEMPLATES[0]['DIRS'][0] + '\\chart4result.html');
def mat10(self):
tt = sns.load_dataset('titanic');
sns.set_style('whitegrid');
fig = plt.figure(figsize=(15,5));
ax1 = fig.add_subplot(1, 3, 1);
ax2 = fig.add_subplot(1, 3, 2);
ax3 = fig.add_subplot(1, 3, 3);
sns.barplot(x='sex',y='survived',data=tt,ax=ax1);
sns.barplot(x='sex', y='survived',hue='class', data=tt, ax=ax2);
sns.barplot(x='sex', y='survived',hue='class', dodge=False,
data=tt, ax=ax3);
plt.savefig(STATICFILES_DIRS[0]+'/tt.jpg');
if __name__ == '__main__':
P109().mat03();
|
"""
Implementations of the subnetworks:
- Encoder
- Generator (Decoder)
- D_real
- D_prior
- D_em
"""
import tensorflow as tf
import numpy as np
from PK_Utils.PK_layers import dense, conv2d, deconv2d, batch_norm
from PK_Utils.PK_config import size_batch, num_z_channels
# --HELPERS ---------------------------------------
# -------------------------------------------------
def lrelu(inp, leak=0.2):
"""
Leaky Rectified Linear Unit (ReLu) activation.
@param inp: input tensor
@return: tensor of same size as input tensor
"""
return tf.maximum(inp, leak*inp)
def concat_label(tensor, label, duplicate=1):
"""
Duplicates label and concatenates it to tensor.
@param tensor: input tensor
(1) of size [batch_size, length]
(2) of size [batch_size, x, x, length]
@param label: input tensor of size [batch_size, label_length]
@return: (1) tensor of size [batch_size, length+duplicate*label_length]
(2) tensor of size [batch_size, x, x, length+duplicate*label_length]
"""
# duplicate the label to enhance its effect
label = tf.tile(label, [1, duplicate])
# get shapes of label and tensor
tensor_shape = tensor.get_shape().as_list()
label_shape = label.get_shape().as_list()
# CASE (1)
if len(tensor_shape) == 2: return tf.concat([tensor, label], 1)
# CASE (2)
if len(tensor_shape) == 4:
# reshape label to [batch_size, 1, 1, duplicate*label_length]
label = tf.reshape(label, [tensor_shape[0], 1, 1, label_shape[-1]])
# scale label to [batch_size, x, x, duplicate*label_length]
label = label*tf.ones([tensor_shape[0], tensor_shape[1], tensor_shape[2], label_shape[-1]])
# concatenate label and tensor
return tf.concat([tensor, label], 3)
# --NETWORKS --------------------------------------
# -------------------------------------------------
def generator(z, valence, arousal, reuse_variables=False):
"""
Creates generator network.
@param z: tensor of size config.num_z_channels
@param valence: tensor of size 1
@param arousal: tensor of size 1
@return: tensor of size 96x96x3
"""
if reuse_variables:
tf.compat.v1.get_variable_scope().reuse_variables()
with tf.compat.v1.variable_scope("G") as scope:
# duplicate valence/arousal label and concatenate to z
z = concat_label(z, valence, duplicate=num_z_channels)
z = concat_label(z, arousal, duplicate=num_z_channels)
# -- fc layer
name = 'G_fc'
current = dense(z, 1024*6*6, reuse=reuse_variables)
# reshape
current = tf.reshape(current, [-1, 6, 6, 1024])
current = tf.nn.relu(current)
# -- transposed convolutional layer 1-4
for index, num_filters in enumerate([512, 256, 128, 64]):
name = 'G_deconv' + str(index+1)
current = deconv2d(current, num_filters, name=name, reuse=reuse_variables)
current = tf.nn.relu(current)
# -- transposed convolutional layer 5+6
current = deconv2d(current, 32, stride=1, name='G_deconv5', reuse=reuse_variables)
current = tf.nn.relu(current)
current = deconv2d(current, 3, stride=1, name='G_deconv6', reuse=reuse_variables)
return tf.nn.tanh(current)
def encoder(current, reuse_variables=False):
"""
Creates encoder network.
@param current: tensor of size 96x96x3
@return: tensor of size config.num_z_channels
"""
if reuse_variables:
tf.compat.v1.get_variable_scope().reuse_variables()
with tf.compat.v1.variable_scope("E") as scope:
# -- transposed convolutional layer 1-4
for index, num_filters in enumerate([64,128,256,512]):
name = 'E_conv' + str(index)
current = conv2d(current, num_filters, name=name, reuse=reuse_variables)
current = tf.nn.relu(current)
# reshape
current = tf.reshape(current, [size_batch, -1])
# -- fc layer
name = 'E_fc'
current = dense(current, num_z_channels, name=name, reuse=reuse_variables)
return tf.nn.tanh(current)
def d_img(current, valence, arousal, reuse_variables=False):
"""
Creates discriminator network on generated image + desired emotion.
@param current: tensor of size 96x96x3
@param valence: tensor of size 1
@param arousal: tensor of size 1
@return: sigmoid(output), output
(output tensor is of size 1)
"""
if reuse_variables:
tf.compat.v1.get_variable_scope().reuse_variables()
with tf.compat.v1.variable_scope("D_img") as scope:
# -- convolutional blocks (= convolution+batch_norm+relu) 1-4
for index, num_filters in enumerate([16, 32, 64, 128]):
# convolution
name = 'D_img_conv' + str(index+1)
current = conv2d(current, num_filters, name=name, reuse=reuse_variables)
# batch normalization
name = 'D_img_bn' + str(index+1)
current = batch_norm(current, name, reuse=reuse_variables)
# relu activation
current = tf.nn.relu(current)
if index==0:
current = concat_label(current, valence, 16)
current = concat_label(current, arousal, 16)
# reshape
current = tf.reshape(current, [size_batch, -1])
# -- fc 1
name = 'D_img_fc1'
current = lrelu(dense(current,1024, name=name, reuse=reuse_variables))
# -- fc 2
name = 'D_img_fc2'
current = dense(current,1, name=name, reuse=reuse_variables)
return tf.nn.sigmoid(current), current
def d_prior(current, reuse_variables=False):
if reuse_variables:
tf.compat.v1.get_variable_scope().reuse_variables()
with tf.compat.v1.variable_scope("D_prior") as scope:
# FC blocks 1-3 (full connection+batch_nom+relu)
for index, num_filters in enumerate([64,32,16]):
name = 'D_prior_fc' + str(index+1)
current = dense(current, num_filters, name=name)
# batch normalization
name = 'D_prior_bn' + str(index+1)
current = batch_norm(current, name, reuse=reuse_variables)
# relu activation
current = tf.nn.relu(current)
# FC block 4
name = 'D_prior_fc' + str(index+2)
current = dense(current, 1, name=name)
return tf.nn.sigmoid(current), current
def d_em(current, reuse_variables=False):
if reuse_variables:
tf.compat.v1.get_variable_scope().reuse_variables()
with tf.compat.v1.variable_scope("d_em") as scope:
# FC blocks 1 (full connection+batch_nom+relu)
# name = 'D_em_fc1'
# current = dense(current, 64, name=name)
# # batch normalization
# name = 'D_em_bn1'
# current = batch_norm(current, name)
# # relu activation
# current = tf.nn.relu(current)
for index, num_filters in enumerate([64,32,16]):
name = 'D_em_fc1' + str(index+1)
current = dense(current, num_filters, name=name)
# batch normalization
name = 'D_em_bn1' + str(index+1)
current = batch_norm(current, name, reuse=reuse_variables)
# relu activation
current = tf.nn.relu(current)
# FC block 2_Arousal
name = 'D_em_fc2_arousal'
current_arousal = dense(current, 1, name=name)
# FC block 2_Valence
name = 'D_em_fc2_arousal'
current_valence = dense(current, 1, name=name)
return tf.nn.sigmoid(current_arousal), tf.nn.sigmoid(current_valence), current_arousal, current_valence
|
https://leetcode.com/problems/count-sorted-vowel-strings/
class Solution:
def countVowelStrings(self, n: int) -> int:
# The formula is (n+5-1)C(n)=(n+4)C(n)
return (n+4)*(n+3)*(n+2)*(n+1)//24
|
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
def img_add_number(image_path,sign="46"):
im=Image.open(image_path)
width,height=im.size
draw=ImageDraw.Draw(im)
font=ImageFont.truetype("Arial.ttf",min(width//6,height//6))
draw.text((width*0.75,height*0.075),sign,font=font,fill=(255,33,33,255))
left,right=image_path.rsplit(".",1)
new_image_path=left+"_"+sign+"."+right
im.save(new_image_path)
if __name__=='__main__':
img_add_number("./sample.jpg")
print"Finished."
|
import os
import sys
import pickle
import warnings
from typing import Dict
import numpy as np
import pandas as pd
from autosklearn.estimators import AutoSklearnClassifier
from sklearn.exceptions import DataConversionWarning
from surfboard.sound import Waveform
from surfboard.feature_extraction import extract_features as get_voice_features
from tqdm import tqdm
from functools import partialmethod
tqdm.__init__ = partialmethod(tqdm.__init__, disable=True)
def get_input_data():
model_path = input()
file_path = input()
mp3 = Waveform(file_path)
return model_path, mp3
def load_model(model_path):
with open(model_path, 'rb') as file:
return pickle.load(file)
def extract_features(sample: Waveform):
features = ['mfcc', 'log_melspec', 'chroma_stft', 'spectral_slope', 'intensity', 'kurtosis_slidingwindow']
df_features: pd.DataFrame = get_voice_features([sample], features, statistics_list=['mean'])
return df_features.to_numpy()
def test_model(feature: np.ndarray, model_meta):
model: AutoSklearnClassifier = model_meta['model']
label_mapping: Dict = model_meta['labels']
label = model.predict(feature)[0]
person = label_mapping[label]
probability = model.predict_proba(feature)[0][label]
return person, probability
def print_output(person, probability):
print(person)
sys.stdout.flush()
print(probability)
sys.stdout.flush()
def main():
model_path, mp3 = get_input_data()
model_meta = load_model(model_path)
features = extract_features(mp3)
person, probability = test_model(features, model_meta)
print_output(person, probability)
if __name__ == '__main__':
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 09:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('email', models.EmailField(blank=True, max_length=70)),
],
),
migrations.CreateModel(
name='Neighborhood',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Locality', models.CharField(max_length=30)),
('name', models.CharField(max_length=30)),
('occupants_count', models.IntegerField(blank=True, default=0)),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.RemoveField(
model_name='post',
name='profile',
),
migrations.RemoveField(
model_name='post',
name='user_profile',
),
migrations.RemoveField(
model_name='ratings',
name='post_rated',
),
migrations.RemoveField(
model_name='profile',
name='bio',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(blank=True, max_length=70),
),
migrations.DeleteModel(
name='Post',
),
migrations.DeleteModel(
name='Ratings',
),
migrations.AddField(
model_name='neighborhood',
name='profile',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Profile'),
),
migrations.AddField(
model_name='neighborhood',
name='user_profile',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='business',
name='biz_hood',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Neighborhood'),
),
migrations.AddField(
model_name='business',
name='biz_owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Profile'),
),
]
|
"""
Plugin to create a quick panel lookup that lets you jump between comment
titles
"""
import os
import imp
import time
import sys
import sublime
import sublime_plugin
import re
#
# > Plugin command
#
class table_of_comments_command(sublime_plugin.TextCommand):
def run(self, edit, move=None, fold=None, unfold=None, generate=None):
toc = TableOfComments(self.view, edit)
if move is not None:
self.traverse_comments(toc, move)
elif fold is not None or unfold is not None:
self.fold_comments(toc, fold, unfold)
elif generate is not None:
toc.create_toc()
else:
self.show_quick_panel(toc)
# >> Quick panel
def show_quick_panel(self, toc):
view = self.view
toc._debug_start('Show quick panel')
toc.create_toc()
# Get current section from cursor
show_index = 0
current_section = toc.get_section_from_cursor()
if current_section:
show_index = current_section['index']
# Store positions for returning to
return_to = []
for each in view.sel():
return_to.append(each)
toc.return_to = return_to
# Pop up the panel
titles = toc.get_comment_titles('string')
self.window = sublime.active_window()
if sys.version_info < (3, 0):
self.window.show_quick_panel(titles, toc.on_list_selected_done)
else:
self.window.show_quick_panel( # Pass on_highlighted callback
titles, toc.on_list_selected_done, False, show_index,
toc.on_list_selected_done)
toc._debug_stop('Show quick panel')
# >> Up down
# Allows moving up and down through comments
def traverse_comments(self, toc, move):
view = self.view
titles = toc.get_comment_titles()
sel = view.sel()
if len(sel) == 1:
current_line_no, col_no = view.rowcol(sel[0].b)
for x in range(len(titles)):
item = titles[x]
if move == 'up': # moving up
if item['line'] < current_line_no:
if x+1 < len(titles):
if titles[x+1]['line'] >= current_line_no:
return toc.on_list_selected_done(x)
else:
return toc.on_list_selected_done(x)
else: # moving down
if item['line'] > current_line_no:
return toc.on_list_selected_done(x)
# >> Fold comments
def fold_comments(self, toc, fold, unfold):
comments = self.view.find_by_selector('comment')
is_all = fold == 'all' or unfold == 'all'
# Get the content regions to fold
fold_regions = []
if is_all:
sections = toc.get_sections()
for s in sections:
content_region = s['content_region']
fold_regions.append(content_region)
else:
section = toc.get_section_from_cursor()
fold_regions.append(section['content_region'])
# Fold, unfold or toggle
if fold is not None:
self.view.fold(fold_regions)
elif unfold is not None:
self.view.unfold(fold_regions)
elif self.view.fold(fold_regions) is False:
self.view.unfold(fold_regions)
#
# > Plugin class
#
class TableOfComments:
def __init__(self, view, edit):
self.view = view
self.edit = edit
#
# Debug timing functions
#
#
timers = {}
def _debug_start(self, ref):
self.timers[ref] = time.time()
def _debug_stop(self, ref):
start_time = self.timers[ref]
duration = time.time() - start_time
self.timers[ref] = duration
#
# Table TOC tag
#
def get_toc_region(self, view):
title = get_setting('toc_title', str)
pattern = r'\/\*(\s|\*)*'+title+r'[^\/]*\/'
matches = view.find_all(pattern)
for region in (matches):
if self.is_scope_or_comment(view, region):
return region
return None
def is_in_toc_region(self, view, region):
toc_region = self.get_toc_region(view)
if toc_region:
if region.a > toc_region.a and region.a < toc_region.b:
return True
return False
def create_toc(self):
view = self.view
edit = self.edit
region = self.get_toc_region(view)
if region:
toc = self.compile_toc(view)
existing = view.substr(region)
if existing != toc:
view.replace(edit, region, toc)
def compile_toc(self, view):
self._debug_start('compile-toc')
titles = self.get_comment_titles('string')
title = get_setting('toc_title', str)
start = get_setting('toc_start', str)
line = get_setting('toc_line', str)
end = get_setting('toc_end', str)
front = "\n" + line
output = start + front + title + front.rstrip()
for title in titles:
comment_level = title.count('-') + 1
try:
level = int(get_setting('toc_level', int))
if level >= comment_level:
output += front + title
except TypeError:
output += front + title
output += "\n"+end
self._debug_stop('compile-toc')
return output
#
# >> Quick panel
#
# Jump list quick menu selected
def on_list_selected_done(self, picked):
if picked == -1:
self.view.sel().clear()
for each in self.return_to:
self.view.sel().add(each)
self.view.show(self.view.sel())
else:
titles = self.get_comment_titles()
title = titles[picked]
row = title['line']
point = self.view.text_point(row, 0)
line_region = self.view.line(point)
# Reference the 'text' within the line only
text = title['text']
text = re.escape(text)
text = text.replace('\>', '>') # ">" does not work when escaped
text_region = self.view.find(text, line_region.a)
# view.rowcol() returns a zero based line number
line = int(title['line'])+1
# Use goto_line to move the document then highlight
if sublime.active_window().active_view():
sublime.active_window().active_view().run_command(
"goto_line", {"line": line}
)
self.view.sel().clear()
self.view.sel().add(text_region)
#
# >> Parse
#
# Core parse function (returned as dict or list)
def get_comment_titles(self, format='dict', test=None):
self._debug_start('get-comment-titles')
view = self.view
level_char = get_setting('level_char', str)
comment_chars = get_setting('comment_chars', str)
comment = list(comment_chars)
comment = 'DIV'.join(comment_chars)
start = r'\s|'+re.escape(comment).replace('DIV', '|')
# build the pattern to match the comment
pattern = r'^('+start+')*?('+format_pattern(level_char)+'+)\s*' + \
r'(.+)('+start+')*?$'
matches = view.find_all(pattern)
results = []
toc_title = get_setting('toc_title', str)
for match in matches:
bits = view.lines(match) # go through each line
for region in bits:
# Ensure it's comment or source
if not self.is_scope_or_comment(view, region):
continue
# Ensure not in toc region already
if self.is_in_toc_region(view, region):
continue
line = view.substr(region)
line_match = re.match(pattern, line)
if not line_match:
continue
if level_char in line:
# Add the level chars
label = line_match.group(2)
# Replace level char with toc char
label = self.replace_level_chars(label)
level = len(label)
if label != '':
label += ' '
# append the heading text, remove trailing comment chars
text = line_match.group(3).strip(comment_chars+' ')
label += text
# Get the position
if line != '' and line != toc_title:
line_no, col_no = view.rowcol(region.b)
if format == 'dict':
results.append(
{'label': label,
'text': text,
'level': level,
'region': region,
'line': line_no})
else:
results.append(label)
self._debug_stop('get-comment-titles')
return results
#
# >> Plugin sections (regions)
#
# Returns list of sections dicts with all related values
def get_sections(self):
comments = self.view.find_by_selector('comment')
titles = self.get_comment_titles()
# Only get comment blocks with titles within them
sections = []
for i in range(len(comments)):
# we need to get the whole lines in order to match
# indented title regions correctly
comment = self.view.line(comments[i])
# If multiple lines returned check for valid lines
comment_lines = self.view.split_by_newlines(comment)
if len(comment_lines) > 0:
fixed_comment_lines = []
for x in range(len(comment_lines)):
if self.is_scope_or_comment(self.view, comment_lines[x]):
fixed_comment_lines.append(comment_lines[x])
if len(fixed_comment_lines) > 0:
comment = sublime.Region(
fixed_comment_lines[0].a,
fixed_comment_lines[len(fixed_comment_lines)-1].b
)
# Append to sections
for title in titles:
if comment.contains(title['region']):
title['title_region'] = comment
sections.append(title)
break
# Get the fold regions (content blocks)
s_no = len(sections)
view_size = self.view.size()
for i in range(s_no):
section = sections[i]
section['index'] = i
region = section['title_region']
# content_region = the area that will be hidden when folded
fold_start = region.b + 1
fold_end = view_size
# get the next section of equal or lower level
for j in range(i+1, s_no):
if sections[j]['level'] <= section['level']:
fold_end = sections[j]['title_region'].a - 1
break
content_region = sublime.Region(fold_start, fold_end)
section['content_region'] = content_region
return sections
# Returns the title and content region from cursor
def get_section_from_cursor(self):
# Current selection
sel = self.view.sel()[0]
line_no, col_no = self.view.rowcol(sel.b)
# Find within sections
sections = self.get_sections()
for section in reversed(sections):
if section['line'] <= line_no:
return section
return False
# Only find titles within genuine comments
# This will no doubt need to be improved over time for various syntaxes
# ('string.quoted' makes python """ comments """ not trigger)
def is_scope_or_comment(self, view, region):
line = view.substr(region)
# Trim to scope
# If line starts with whitespace, the syntax returned is "source" not
# "comment" for the initial char
trimmed = line.lstrip()
diff = len(line) - len(trimmed)
scope = view.scope_name(region.a + diff)
# Check out scope
comments_scope = ['comment']
disallow = ['string.quoted']
for each in comments_scope:
if scope.find(each) < 0:
return False
for each in disallow:
if scope.find(each) > 0:
return False
return True
def replace_level_chars(self, line):
level_char = get_setting('level_char', str)
toc_char = get_setting('toc_char', str)
# remove the last char so level one has no indent
line = line[:-1].replace(level_char, toc_char)
return line
#
# Helpers
#
def format_pattern(pattern):
pattern = re.escape(pattern)
pattern = pattern.replace('\>', '>')
return pattern
def get_setting(name, typeof=str):
settings = sublime.load_settings('tableofcomments.sublime-settings')
setting = settings.get(name)
if setting:
if typeof == str:
return setting
if typeof == bool:
return setting is True
elif typeof == int:
return int(settings.get(name, 500))
else:
if typeof == str:
return ''
else:
return None
#
# Testing infrastructure
#
if sys.version_info < (3, 0):
import tests
else:
from . import tests
class table_of_comments_run_tests_command(sublime_plugin.TextCommand):
def run(self, edit):
reload_test_bootstrap()
tests.run(self.view, edit)
# For developing, reload tests.* which in turn reloads it's sub packages
basedir = os.getcwd()
def reload_test_bootstrap():
os.chdir(basedir)
path = 'tests'
if sys.version_info < (3, 0):
__import__(path)
sys.modules[path] = reload(sys.modules[path])
else:
imp.reload(eval(path))
class table_of_comments_auto_runner(sublime_plugin.EventListener):
def on_pre_save(self, view):
if get_setting('toc_generate_on_save', bool):
view.run_command('table_of_comments', { 'generate': True })
|
# coding=utf-8
import numpy as np
import scipy.stats
from .cobsampler import ChangeOfBasisSampler
class Test(object):
"""
Super class implementing tests for CoBSampler. Sub-classes should specify
target distribution.
"""
def __init__(self, ndim, target, nsteps, cobparams={}):
self.ndim = ndim
self.targetdist = target
self.niterations = nsteps
self.firscob = cobparams.pop('firstcob', 1000)
self.ncob = cobparams.pop('ncob', 1000)
self.updatecob = cobparams.pop('updatecob', 1000)
self.sampler = ChangeOfBasisSampler
def run(self, p0):
# initialise sampler
sampler = self.sampler(self.ndim, self.targetdist.logpdf, (),
{}, startpca=self.firscob,
nupdatepca=self.updatecob,
npca=self.ncob)
# p0 = np.zeros(self.ndim)
sampler.run_mcmc(self.niterations, p0)
return sampler
class MultinormalTest(Test):
"""
Class implementing test on multinormal distribution.
"""
def __init__(self, nsteps, ndim=2, mean=None, cov=None, cobparams={}):
"""
:param int nsteps: number of MCMC iterations.
:param int ndim: target dimension
:param np.array cov: covariance matrix. If None, random covariance is
constructed.
"""
target = Multinormal(ndim, mean, cov)
super(MultinormalTest, self).__init__(ndim, target, nsteps, cobparams)
class RosenbrockTest(Test):
"""
Class implementing test on Rosenbrock density.
"""
def __init__(self, nsteps, a=1, b=100, cobparams={}):
target = Rosenbrock(a, b, 2)
super(RosenbrockTest, self).__init__(2, target, nsteps, cobparams)
class TargetDistribution(object):
"""
Class for test target distributions.
"""
class Multinormal(TargetDistribution):
def __init__(self, ndim=2, mean=None, cov=None):
self.ndim = ndim
if mean is None:
mean = np.zeros(ndim)
else:
assert len(mean) == ndim, 'Dimensions of mean arry do no match ' \
'of dimensions.'
self.mean = mean
if cov is not None:
assert cov.shape == (ndim, ndim), 'Dimensions of covariance ' \
'matrix do no match.'
self.cov = cov
else:
# If covariance is not given, initialise at random.
self.cov = 0.5 - np.random.rand(self.ndim ** 2).reshape((self.ndim,
self.ndim))
self.cov = np.triu(self.cov)
self.cov += self.cov.T - np.diag(self.cov.diagonal())
self.cov = np.dot(self.cov, self.cov)
self.dist = scipy.stats.multivariate_normal(mean=self.mean,
cov=self.cov)
def pdf(self, x):
"""
Return value of pdf at point x
:param np.array x: Position in parameter space.
:return:
"""
return self.dist.pdf(x)
def logpdf(self, x):
return self.dist.logpdf(x)
class Rosenbrock(TargetDistribution):
"""
Class implementing the Rosenbrock density.
"""
def __init__(self, a=1, b=100, ndim=2):
self.a = a
self.b = b
self.ndim = ndim
def pdf(self, x):
if (np.abs(x[0]) > 10) or (np.abs(x[1]) > 10):
return 0
return np.exp(-(self.a - x[0])**2 - self.b*(x[1] - x[0]**2)**2)
def logpdf(self, x):
if (np.abs(x[0]) > 30) or (np.abs(x[1]) > 30):
return -np.inf
else:
return -(self.a - x[0])**2 - self.b*(x[1] - x[0]*x[0])**2
def contour(self, k, n=1000):
"""
:param float k: constant identifying contour.
:param int n: number of points used to construct contour.
"""
x = np.linspace(self.a - k, self.a + k, n)
yplus = x**2 + np.sqrt( (k**2 - (x - self.a)**2)/self.b )
yminus = x ** 2 - np.sqrt((k ** 2 - (x - self.a) ** 2) / self.b)
xx = np.concatenate((x, x[::-1]))
yy = np.concatenate((yminus, yplus[::-1]))
return np.array([xx, yy])
def rvs(self, size=1):
"""
Draw samples from the Rosenbrock density.
Uses the fact that p(x1,x2) = p(x2|x1)*p(x1) and that:
1) p(x1) \propto N(a, 1)
2) p(x2|x1) \propto N(x1**2, 1/sqrt(2*b))
"""
# Draw samples from marginal p(x1)
x1 = np.random.randn(size) + self.a
# Draw samples from conditional, p(x2 | x1)
sigma = 1./np.sqrt(2 * self.b)
x2 = np.random.randn(size) * sigma + x1**2
return np.array([x1, x2]).T
|
#!/usr/bin/env python
#
# MIT License
#
# Copyright (c) 2018-2019 Groupe Allo-Media
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import argparse
from typing import List, Tuple
import pika
from pika.exceptions import (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
)
class Resurrection:
def __init__(
self, url: str, queue: str, batch_size: int = 1, count: int = 0
) -> None:
connection = pika.BlockingConnection(pika.URLParameters(url))
channel = connection.channel()
self._batch_size = batch_size
result = channel.queue_declare(queue, passive=True)
channel.basic_qos(prefetch_count=self._batch_size)
queue_name = result.method.queue
self._count = result.method.message_count if count == 0 else count
self._seen = 0
self.messages: List[
Tuple[pika.spec.Basic.Deliver, pika.spec.BasicProperties, bytes]
] = []
channel.basic_consume(
queue=queue_name, on_message_callback=self.callback, auto_ack=False
)
self._channel = channel
self._connection = connection
def callback(
self,
ch: pika.channel.Channel,
method: pika.spec.Basic.Deliver,
properties: pika.spec.BasicProperties,
body: bytes,
) -> None:
# we cache the message to avoid loops if
# some resurrected messages come back dead again.
self.messages.append((method, properties, body))
print("Buffering message", method)
self._seen += 1
if self._seen == self._count:
print("replay")
self.replay()
print("stop consuming")
self._channel.stop_consuming()
elif self._seen % self._batch_size == 0:
print("replay batch")
self.replay()
def replay(self):
for method, properties, body in self.messages:
print("Replaying", method)
print()
self._channel.basic_publish(
exchange=properties.headers["x-first-death-exchange"],
routing_key=method.routing_key,
body=body,
properties=pika.BasicProperties(
delivery_mode=2, # make message persistent
content_type=properties.content_type,
reply_to=properties.reply_to,
correlation_id=properties.correlation_id,
headers=properties.headers,
),
)
# Confirm consumption only if successfuly resent
self._channel.basic_ack(method.delivery_tag)
self.messages.clear()
def run(self):
try:
self._channel.start_consuming()
except KeyboardInterrupt:
return True
except (
ChannelClosed,
ConnectionClosed,
AMQPConnectionError,
AMQPHeartbeatTimeout,
) as e:
print(e)
return False
else:
return True
finally:
if not self._connection.is_closed:
self._connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resend dead letters.")
parser.add_argument("amqp_url", help="URL of the broker, including credentials.")
parser.add_argument("queue", help="Name of dead-letter queue.")
parser.add_argument(
"--count",
help="Number of message to resurrect (default is 0 = all).",
type=int,
default=0,
)
parser.add_argument(
"--batch_size",
help="for more efficiency, if the messages are small, process them in batches of this size (default is 1).",
type=int,
default=1,
)
# parser.add_argument(
# "--filter",
# help="Log patterns to subscribe to (default to all)",
# nargs="*",
# default=["#"],
# )
args = parser.parse_args()
expected_stop = False
print("Ctrl-C to quit.")
print("Resurrecting from:", args.queue)
inspector = Resurrection(args.amqp_url, args.queue, args.batch_size, args.count)
if inspector.run():
print("Done!")
else:
print("connection error (closed)")
|
# pylint: disable-msg=no-name-in-module,import-error
from distutils.cmd import Command
from os import getcwd
from subprocess import check_call
from setuptools import setup
class ComputePerformingFundsCommand(Command):
description = 'calculate performing funds'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
command = ['python', '-m', 'scripts.compute_performing_funds']
return check_call(command)
class ComputeReturnsCommand(Command):
description = 'compute returns from db'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
command = ['python', '-m', 'scripts.compute_returns']
return check_call(command)
class PylintCommand(Command):
description = 'run pylint on Python source files'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
command = ['pylint', getcwd()]
return check_call(command)
class ListCollectionsCommand(Command):
description = 'list all the collections in db'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
command = ['python', '-m', 'scripts.list_collections']
return check_call(command)
setup(cmdclass={
'compute_performing_funds': ComputePerformingFundsCommand,
'compute_returns': ComputeReturnsCommand,
'lint': PylintCommand,
'list_collections': ListCollectionsCommand
})
|
"""User Expression module
=============================
Contains the definition of user defined expressions.
It has the class UserExpression that evaluates an expression defined in an string.
Example:
Creating an expression::
true_value = UserExpression("1==1").evaluate()
false_expression = UserExpression("1==2").evaluate()
true_value = 3 == UserExpression("1+2").evaluate()
.. warning::
Currently the use of an object with this class is unsafe.
Is very dangerous if you accept strings to evaluate from untrusted input because
it can execute malicious code.
"""
from typing import Any
from gsf.core.expressions import Expression
class UserExpression(Expression):
value: Any
"""Value of the expression."""
def __init__(self, expression_value: Any):
self.value = expression_value
def evaluate(self):
return self.value
def __str__(self):
return str(self.evaluate())
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 19:33:17 2019
@author: Mysia
"""
import numpy as np
def nnloss(x,t,dzdy):
instanceWeights=np.ones(len(x))
res=x-t
if(dzdy==0):
y = (1/2) * instanceWeights * np.power(res,2)
else:
y = res
return y
|
import time, DAN, requests, random, datetime
import sys
from threading import Thread
import re
import json
import traceback
name = ""
light = 0
interval = 50
changeinterval = 10 #s
ledmesseges = {}
ledindex = 0
skip = 0
def sendMessegeToLEDMatrix(msg):
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':[msg]}))
def loop():
global changeinterval
global ledindex
global skip
while 1:
if skip == 0:
now = datetime.datetime.now()
for index in list(ledmesseges.keys()):
if index < now:
if list(ledmesseges.keys()).index(index) < ledindex:
ledindex = ledindex - 1
del ledmesseges[index]
if len(ledmesseges):
print("ledsent:"+ledmesseges[list(ledmesseges.keys())[ledindex]])
sendMessegeToLEDMatrix(ledmesseges[list(ledmesseges.keys())[ledindex]]);
if ledindex < len(ledmesseges)-1:
ledindex = ledindex+1
else:
ledindex = 0
else:
skip = 0
time.sleep(changeinterval)
def push2queue(msg, sec):
global ledmesseges
ledmesseges[datetime.datetime.now() + datetime.timedelta(seconds = sec)] = msg
def interrupt(msg):
global skip
skip = 1
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':[msg]}))
loopthread = Thread(target=loop)
loopthread.start()
ServerURL = 'http://140.113.199.189:9999' #with no secure connection
#ServerURL = 'https://DomainName' #with SSL connection
Reg_addr = None #if None, Reg_addr = MAC address
DAN.profile['dm_name']='LEDMatrixManager'
DAN.profile['df_list']=['LEDCommandSender', 'LEDManagerReciever']
DAN.profile['d_name']= None # None for autoNaming
DAN.device_registration_with_retry(ServerURL, Reg_addr)
while True:
try:
Msg=DAN.pull('LEDManagerReciever')
if Msg != None:
msg = json.loads(Msg[0])
model = msg['model']
if model == 'TextSender':
print(msg['data'][0])
m = re.match("^.*現在(.*)天氣.*$", msg['data'][0])
if m != None:
DAN.push('LEDCommandSender', json.dumps({'model':'weather', 'data':[m.group(1)]}))
m = re.match("^.*現在(時間|幾點).*$", msg['data'][0])
if m != None:
interrupt('{0:%H:%M}'.format(datetime.datetime.now()))
m = re.match("^.*今天.*(幾月|幾號|幾日|日期).*$", msg['data'][0])
if m != None:
push2queue('{0:today: %m-%d-%Y}'.format(datetime.datetime.now()), 60)
m = re.match("^"+name+".*亮度([0-9]*).*$", msg['data'][0])
if m != None:
light = eval(m.group(1))
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!intensity '+str(light)]}))
m = re.match("^"+name+".*亮.?點.*$", msg['data'][0])
if m != None:
if light < 15:
light += 1
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!intensity '+str(light)]}))
m = re.match("^"+name+".*暗.?點.*$", msg['data'][0])
if m != None:
if light > 0:
light -= 1
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!intensity '+str(light)]}))
m = re.match("^"+name+".*快.?點.*$", msg['data'][0])
if m != None:
interval /= 2
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!interval '+str(interval)]}))
m = re.match("^"+name+".*慢.?點.*$", msg['data'][0])
if m != None:
interval *= 2
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!interval '+str(interval)]}))
m = re.match("^"+name+".*間隔.*(久|長).?點.*$", msg['data'][0])
if m != None:
if changeinterval <3:
changeinterval += 3
m = re.match("^"+name+".*間隔.*(短|小).?點.*$", msg['data'][0])
if m != None:
if changeinterval > 3:
changeinterval -= 3
m = re.match("^"+name+".*向左滑.*$", msg['data'][0])
if m != None:
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!mode scrollLeft']}))
m = re.match("^"+name+".*向右滑.*$", msg['data'][0])
if m != None:
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':['!mode scrollRight']}))
m = re.match("^.*你.*名字.*$", msg['data'][0])
if m != None:
interrupt(name)
m = re.match("^"+name+".*插播(.*)$", msg['data'][0])
if m != None:
interrupt(m.group(1))
m = re.match("^"+name+".*顯示([0-9]*)秒(.*)$", msg['data'][0])
if m != None:
push2queue(m.group(2), eval(m.group(1)))
m = re.match("^"+name+".*改名.*([a-z]*)$", msg['data'][0])
if m != None:
name = m.group(1)
m = re.match("^"+name+"笑臉$", msg['data'][0])
if m != None:
interrupt("O wO")
m = re.match("^"+name+"哭臉$", msg['data'][0])
if m != None:
interrupt("Q wQ")
m = re.match("^"+name+"眨眼$", msg['data'][0])
if m != None:
interrupt("> w<")
m = re.match("^(!.*)$", msg['data'][0])
if m != None:
DAN.push('LEDCommandSender', json.dumps({'model':'LEDMatrix', 'data':[m.group(1)]}))
elif model == 'weather':
push2queue(msg['data'][0], 60)
except Exception as e:
print(e)
print(traceback.format_exception(None, e, e.__traceback__), file=sys.stderr, flush=True)
if str(e).find('mac_addr not found:') != -1:
print('Reg_addr is not found. Try to re-register...')
DAN.device_registration_with_retry(ServerURL, Reg_addr)
else:
print('Connection failed due to unknow reasons.')
time.sleep(1)
time.sleep(0.2)
|
import argparse
import sys
from subprocess import call
import numpy as np
def parseVals(fname):
f = open(fname)
line = f.readline()
rtvals = []
schemes = []
while line:
if (line == '' or line == '\n'):
break
tmp = line.split()
if (tmp[0] == 'matrix'):
schemes = tmp[1:]
# print 'number of schemes:', len(schemes)
# print 'scheme names:', ' '.join(schemes)
line = f.readline()
continue
mat = tmp[0]
tmp = tmp[1:]
assert len(tmp) == len(schemes)
for (rt, sch) in zip(tmp, schemes):
rtvals.append((sch, float(rt), mat, 1.0 / float(rt)))
line = f.readline()
f.close()
return schemes, rtvals
def perfProfVals(schemes, rtvals, args):
'''
partitioning instance = (mat)
'''
inst = {}
for x in rtvals:
t = x[2]
if (t not in inst):
inst[t] = []
inst[t].append((x[0], x[3]))
# normalize
instNew = {}
for k, v in inst.iteritems():
if args.lines == '':
best = min([x[1] for x in v])
else:
best = min([v[int(i)][1] for i in args.lines.split(',')])
instNew[k] = []
for i in range(len(v)):
x = list(v[i])
x[1] = float(x[1]) / float(best)
instNew[k].append(tuple(x))
inst = instNew
ninst = len(inst)
sys.stdout.write('Number of instances: %d\n' % (ninst))
# pp.pprint(inst)
xmax = max([x[3] for x in rtvals])
xticks = [round(x, 3) for x in np.arange(1.00, args.xmax * 10, args.xstep)]
# xticks = [round(x,3) for x in np.arange(1.00, args.xmax, args.xstep)]
# xticks.extend(np.linspace(args.xmax, xmax, 10000))
perfvals = {}
for sch in schemes:
perfvals[sch] = [0 for x in range(len(xticks))]
for k, v in inst.iteritems():
for x in v:
sch = x[0]
normval = x[1]
for i in range(len(xticks)):
if (normval <= xticks[i]):
perfvals[sch][i] += 1
# percentages
for k, v in perfvals.iteritems():
for i in range(len(v)):
v[i] = round(float(v[i]) / float(ninst), 3)
return perfvals, xticks
def genPlot(schemes, perfvals, xticks, args):
outdatafile = open((args.name + '-perf-prof.dat'), 'w')
outgpfile = open((args.name + '-perf-prof.p'), 'w')
s = '# within%\t'
for sch in schemes:
s += str(sch) + '\t'
s += '\n'
outdatafile.write(s)
yticks = [round(y, 3) for y in np.arange(0.05, 1.05, args.ystep)]
yticksVals = []
for y_idx in range(len(yticks)):
y = yticks[y_idx]
yticksVals.append([])
for sch in schemes:
vals = perfvals[sch]
x = '?'
for i in range(len(xticks)):
if (y <= vals[i]):
x = xticks[i]
break
yticksVals[y_idx].append(x) # one value per scheme
for x_idx in range(len(xticks)):
x = xticks[x_idx]
for y_idx in range(len(yticks)):
y = yticks[y_idx]
s = str(x) + '\t'
for schVal in yticksVals[y_idx]:
if (str(schVal) == '?'):
s += '?\t'
elif (schVal == x):
s += str(y) + '\t'
else:
# sys.stdout.write('this should not happen.\n')
s += '?\t'
s += '\n'
outdatafile.write(s)
outdatafile.close()
outgpfile.write('set term postscript eps enhanced color\n')
outgpfile.write('set output \'' +
args.name + '-perf-prof.eps' +
'\'\n')
outgpfile.write("set size 2.00,2.75\n")
outgpfile.write("unset log\n")
outgpfile.write("unset label\n")
outgpfile.write("set xrange [%f:%f]\n" %
(0.98, args.xmax))
outgpfile.write("set yrange [0:1.0]\n")
xstr = "set xtics ("
for x in np.arange(1.00, args.xmax, 0.20):
xstr += "\"" + str(round(x, 2)) + "\"" + " " + str(round(x, 2)) + ", "
xstr = xstr[0:len(xstr) - 2]
xstr += ") font \", 18\""
outgpfile.write("%s\n" % (xstr))
ystr = "set ytics ("
for y in np.arange(0.00, 1.10, 0.10):
ystr += "\"" + str(round(y, 2)) + "\"" + " " + str(round(y, 2)) + ", "
ystr = ystr[0:len(ystr) - 2]
ystr += ") font \", 18\""
outgpfile.write("%s\n" % (ystr))
# outgpfile.write('set title \"' +
# args.title +
# '\" font \", 24\"\n')
outgpfile.write('set xlabel \"Parallel runtime relative to the best'
'\" font \", 24\"\n')
outgpfile.write('set ylabel \"fraction of test cases\" font \", 24\"\n')
# outgpfile.write("set key inside right bottom spacing 1.40 font \", 20\"\n")
# outgpfile.write("set key horizontal outside center bottom spacing 1.40 font \", 24\"\n")
if args.style == 1:
pointIds = [5, 4, 7, 6, 13, 12, 9, 8, 11, 10, 15, 14]
pointSizes = [1.5] * 12
lineTypes = [1] * 12
colorIds = ['#DC143C', '#DC143C', '#0000FF', '#0000FF', '#FF7F50', '#FF7F50',
'#DA70D6', '#DA70D6', '#3CB371', '#3CB371', '#808080', '#808080']
outgpfile.write("set bmargin 16\n")
# outgpfile.write("set key horizontal outside bottom center maxcolumns 3 font \",12\n")
# outgpfile.write("unset key\n")
outgpfile.write("set key horizontal inside right bottom maxcolumns 2\n")
# outgpfile.write("set key box\n")
elif args.style == 2:
pointIds = [5, 7, 13, 1, 2, 3]
pointSizes = [1] * 12
lineTypes = [1] * 12
colorIds = ['#DC143C', '#0000FF', '#FF7F50', '#DA70D6', '#3CB371', '#808080']
outgpfile.write("set key horizontal outside bottom center maxcolumns 4 font \",24\n")
elif args.style == 3:
pointIds = [5, 7, 4, 6, 1]
pointSizes = [1] * 12
lineTypes = [1] * 12
colorIds = ['#DC143C', '#0000FF', '#DC143C', '#0000FF', '#DA70D6']
outgpfile.write("set key horizontal inside right bottom maxcolumns 1 font \",24\n")
elif args.style == 4:
pointIds = [5, 7, 13, 15, 1, 2, 3]
pointSizes = [1] * 12
lineTypes = [1] * 12
colorIds = ['#DC143C', '#0000FF', '#FF7F50', '#808080', '#DA70D6', '#3CB371', '#808080']
outgpfile.write("set key horizontal outside center bottom maxcolumns 2 font \",24\n")
else:
exit(1)
# pointIds = [0, 1, 2, 3, 4, 6, 8, 10, 12, 14, 5, 7] # count = 12
# pointIds = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # count = 12
pointIds = [4, 6, 8, 10, 12, 14, 1, 2]
pointSizes = [1.35, 1.60, 1.90, 1.90, 1.80, 1.70, 1.80, 1.80]
colorIds = ['#DC143C', '#FF7F50', '#DA70D6', '#3CB371', '#808080', '#0000FF', '#B8860B', '#008B8B']
# colorIds = ['#000000'] * len(schemes)
lineTypes = [1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1]
outgpfile.write('set bmargin 14\n')
outgpfile.write("set key horizontal outside center bottom\n")
colorIds = ['#DC143C', '#FF7F50', '#DA70D6', '#3CB371', '#808080', '#0000FF']
pointIds = [-1, 0, 1, 2, 3, -1, 4, 6, 8, 10, 12, 4, 5, 7, 9, 11, 13, 5]
oldLen = len(colorIds)
while len(colorIds) < len(schemes):
colorIds.append(colorIds[len(colorIds) % oldLen])
oldLen = len(lineTypes)
while len(lineTypes) < len(schemes):
lineTypes.append(lineTypes[len(lineTypes) % oldLen])
oldLen = len(pointIds)
while len(pointIds) < len(schemes):
pointIds.append(pointIds[len(pointIds) % oldLen])
oldLen = len(pointSizes)
while len(pointSizes) < len(schemes):
pointSizes.append(pointSizes[len(pointSizes) % oldLen])
if args.lines == '':
lines = [i for i in range(len(schemes))]
else:
lines = [int(i) for i in args.lines.split(',')]
for i in range(len(lines)):
outgpfile.write('set style line %d lc rgb \'%s\' '
'lt %d lw 2 pt %d ps %.2f\n' %
(i + 1, colorIds[i], lineTypes[i],
pointIds[i], pointSizes[i]))
outgpfile.write('plot ')
first = True
for i, v in enumerate(lines):
if first:
first = False
else:
outgpfile.write(', \\\n')
outgpfile.write('\t"' +
args.name + '-perf-prof.dat' +
'\" u 1:%d t \'%s\' '
'w linespoints ls %d' %
(v + 2, '%s' % (schemes[v]), i + 1))
outgpfile.write("\nset output\n")
outgpfile.close()
call(["gnuplot", args.name + '-perf-prof.p'])
return
parser = argparse.ArgumentParser()
parser.add_argument('result_file')
parser.add_argument('--xstep', type=float, default=0.010)
parser.add_argument('--xmax', type=float, default=2.55)
parser.add_argument('--ystep', type=float, default=0.025)
parser.add_argument('--title', type=str, default="TODO: title")
parser.add_argument('--style', type=int, default=0)
parser.add_argument('--lines', type=str, default="")
parser.add_argument('--name', type=str, default="a")
args = parser.parse_args()
schemes, rtvals = parseVals(args.result_file)
perfvals, xticks = perfProfVals(schemes, rtvals, args)
genPlot(schemes, perfvals, xticks, args)
# genPlot(schemes, perfvals, xticks, "Two-phase schemes (cori-KNL)", args)
# genPlot(schemes, perfvals, xticks, "One-phase schemes (cori-KNL)", args)
|
from setuptools import setup
setup(
name='deepgp_approxep',
version='1.0',
description='Approximate Expectation Propagation for Deep GPs',
author='Thang Bui et al (2016)',
author_email='thang.buivn@gmail.com',
packages=['deepgp_approxep'], #same as name
install_requires=['numpy'], #external packages as dependencies
)
|
# The rand7() API is already defined for you.
# def rand7():
# @return a random integer in the range 1 to 7
class Solution:
def rand10(self):
"""
:rtype: int
"""
a = rand7()
while a > 5:
a = rand7()
b = rand7()
while b == 4:
b = rand7()
if b > 4:
return 5 + a
else:
return a
# @lc code=end
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ config.py ]
# Synopsis [ configuration settings ]
# Author [ Ting-Wei Liu (Andi611) ]
# Copyright [ Copyleft(c), NTUEE, NTU, Taiwan ]
"""*********************************************************************************************"""
import argparse
def get_config():
parser = argparse.ArgumentParser()
parser.add_argument('mode', type=str, choices=['train', 'moments', 'preview', 'incept', 'infer', 'generate'])
parser.add_argument('--conditional', action='store_true', help='Train a conditional SpecGAN')
data_args = parser.add_argument_group('Data')
data_args.add_argument('--data_dir', type=str, help='Data directory')
data_args.add_argument('--data_tfrecord_prefix', type=str, help='Prefix of the .tfrecord files')
data_args.add_argument('--data_first_window', action='store_true', help='If set, only use the first window from each audio example')
data_args.add_argument('--data_moments_fp', type=str, help='Path to store and retrieve the data moments .pkl file')
SpecGAN_args = parser.add_argument_group('SpecGAN')
SpecGAN_args.add_argument('--SpecGAN_kernel_len', type=int, help='Length of square 2D filter kernels')
SpecGAN_args.add_argument('--SpecGAN_dim', type=int, help='Dimensionality multiplier for model of G and D')
SpecGAN_args.add_argument('--SpecGAN_batchnorm', action='store_true', help='Enable batchnorm')
SpecGAN_args.add_argument('--SpecGAN_disc_nupdates', type=int, help='Number of discriminator updates per generator update')
SpecGAN_args.add_argument('--SpecGAN_loss', type=str, choices=['dcgan', 'lsgan', 'wgan', 'wgan-gp'], help='Which GAN loss to use')
SpecGAN_args.add_argument('--SpecGAN_genr_upsample', type=str, choices=['zeros', 'nn', 'lin', 'cub'], help='Generator upsample strategy')
SpecGAN_args.add_argument('--SpecGAN_ngl', type=int, help='Number of Griffin-Lim iterations')
SpecGAN_args.add_argument('--SpecGAN_word_embedding_dim', type=int, help='Dimension for word conditional vectors')
SpecGAN_args.add_argument('--SpecGAN_model_initializer', type=str, choices=['orthogonal', 'default'], help='GAN model initializer')
SpecGAN_args.add_argument('--SpecGAN_prior_noise', type=str, choices=['uniform', 'normal'], help='GAN prior distribution')
train_args = parser.add_argument_group('Train')
train_args.add_argument('--train_dir', type=str, help='Training directory')
train_args.add_argument('--train_batch_size', type=int, help='Batch size')
train_args.add_argument('--train_max_step', type=int, help='Maximum training steps before terminating training')
train_args.add_argument('--train_save_secs', type=int, help='How often to save model')
train_args.add_argument('--train_summary_secs', type=int, help='How often to report summaries')
train_args.add_argument('--train_display_step', type=int, help='How often to display training log')
train_args.add_argument('--train_save_log_step', type=int, help='How often to save training log')
preview_args = parser.add_argument_group('Preview')
preview_args.add_argument('--preview_n', type=int, help='Number of samples to preview')
incept_args = parser.add_argument_group('Incept')
incept_args.add_argument('--incept_metagraph_fp', type=str, help='Inference model for inception score')
incept_args.add_argument('--incept_ckpt_fp', type=str, help='Checkpoint for inference model')
incept_args.add_argument('--incept_n', type=int, help='Number of generated examples to test')
incept_args.add_argument('--incept_k', type=int, help='Number of groups to test')
preview_args = parser.add_argument_group('Generate')
preview_args.add_argument('--generate_dir', type=str, help='Generation directory')
preview_args.add_argument('--generate_num', type=int, help='Number of samples to generate')
preview_args.add_argument('--generate_visualize_num', type=int, help='Number of samples to generate and visualize')
constant_args = parser.add_argument_group('Constants')
constant_args.add_argument('--_VOCAB_SIZE', type=int, help='Expected vocabulary size')
constant_args.add_argument('--_FS', type=int, help='Frequency')
constant_args.add_argument('--_WINDOW_LEN', type=int, help='Window length')
constant_args.add_argument('--_D_Z ', type=int, help='Dimension of noise z')
constant_args.add_argument('--_LOG_EPS', type=float, help='Log eps constant')
constant_args.add_argument('--_CLIP_NSTD', type=float, help='Clip stantard normalization')
parser.set_defaults(
#---data---#
data_dir='../data/sc09_preprocess_energy',
data_tfrecord_prefix='sc09',
data_first_window=False,
data_moments_file='moments.pkl',
#---SpecGAN---#
SpecGAN_kernel_len=5,
SpecGAN_dim=64,
SpecGAN_batchnorm=False,
SpecGAN_disc_nupdates=5,
SpecGAN_loss='wgan-gp',
SpecGAN_genr_upsample='zeros',
SpecGAN_ngl=16,
SpecGAN_word_embedding_dim=10,
SpecGAN_model_initializer='default',
SpecGAN_prior_noise='normal',
#---train---#
train_dir='../train_energy',
train_batch_size=64,
train_max_step=300000,
train_save_secs=300,
train_summary_secs=300,
train_display_step=20,
train_save_log_step=100,
#---preview---#
preview_n=32,
#---incept---#
incept_metagraph_fp='../eval/inception/infer.meta',
incept_ckpt_fp='../eval/inception/best_acc-103005',
incept_n=5000,
incept_k=10,
#---generate---#
generate_dir='../generate_energy',
generate_num=50,
generate_visualize_num=50,
#---constant---#
_VOCAB_SIZE=10,
_FS=16000,
_WINDOW_LEN=16384,
_D_Z=100,
_LOG_EPS=1e-6,
_CLIP_NSTD=3.0)
args = parser.parse_args()
return args
|
import time
import twitter
import twitter_tokens
api = twitter.Api(
consumer_key=twitter_tokens.api_key,
consumer_secret=twitter_tokens.api_secret,
access_token_key=twitter_tokens.access_token,
access_token_secret=twitter_tokens.access_token_secret
)
# api.PostUpdate("This is the first tweet")
def temp_request(current_temp):
try:
latest_status = api.GetHomeTimeline(count=1)
latest_status_text = latest_status[0].text.lower()
latest_status_id = latest_status[0].id
if "what" in latest_status_text and "temperature" in latest_status_text:
api.DestroyStatus(latest_status_id)
print "Getting and tweeting temp!"
api.PostUpdate("The current temperature is %d deg. (%s)" % (current_temp, time.ctime()))
except Exception as e:
print "There is a problem with connecting to twitter at %s, please check your internet connection. The exception is %s" % (time.ctime(), e)
pass
def tweet(tweet):
try:
api.PostUpdate("%s (%s)" % (tweet, time.ctime()))
except Exception as e:
print "There is a problem connecting to twitter, could not tweet '%s' at %s due to exception %s" % (tweet, time.ctime(), e)
pass
# from twython import Twython
# twy = Twython(
# twitter_tokens.api_key,
# twitter_tokens.api_secret,
# twitter_tokens.access_token,
# twitter_tokens.access_token_secret
# )
# photo = open("/Users/macuser/Desktop/Programming/python/data_analysis/temp.png", 'rb')
# twy.update_status_with_media(media=photo, status="Testing temp graphs.")
|
__copyright__ = "Copyright (c) 2020-2021 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from PIL import Image
from jina import Flow
from ...pdf_segmenter import PDFSegmenter
def test_flow(test_dir, doc_generator_img_text, expected_text):
flow = Flow().add(uses=PDFSegmenter)
doc_array = doc_generator_img_text
for doc in doc_array:
with flow:
results = flow.post(
on='/test',
inputs=doc,
return_results=True
)
assert len(results[0].docs) == 1
chunks = results[0].docs[0].chunks
assert len(chunks) == 3
for idx, c in enumerate(chunks[:2]):
with Image.open(os.path.join(test_dir, f'data/test_img_{idx}.jpg')) as img:
blob = chunks[idx].blob
assert chunks[idx].mime_type == 'image/*'
assert blob.shape[1], blob.shape[0] == img.size
if idx == 0:
assert blob.shape == (660, 1024, 3)
if idx == 1:
assert blob.shape == (626, 1191, 3)
# Check text
assert chunks[2].text == expected_text
assert chunks[2].mime_type == 'text/plain'
|
from setuptools import setup
setup(
name='pypythia',
packages=['pypythia'],
license='MIT',
version='0.0.3',
author='Christian Schulze',
author_email='chris@andinfinity.de',
url='https://github.com/ChristianSch/PyPythia'
)
|
import asyncio
import audioop
import functools
import ipaddress
import threading
import traceback
import urllib.parse
from typing import Coroutine, Union
import av
from ..config import Config
from ..natives import AudioFifo, AudioFilter
from ..utils.threadLock import withLock
AVOption = {
"err_detect": "ignore_err",
"reconnect": "1",
"reconnect_streamed": "1",
"reconnect_delay_max": "5",
}
class PyAVSource:
def __init__(self, Source: str) -> None:
self.loop = asyncio.get_event_loop()
self.Source = Source
self.AVOption = AVOption
self.Container: av.StreamContainer = None
self.selectAudioStream = self.FrameGenerator = None
self._end = threading.Event()
self._haveToReloadResampler = threading.Event()
self._waitforread = threading.Lock()
self._loading = threading.Lock()
self._seeking = threading.Lock()
self.BufferLoader = None
self.AudioFifo = AudioFifo()
self._duration = None
self._position = 0.0
self._volume = 1.0
self._filter = {}
self.stopped = False
def __del__(self):
self.cleanup()
@property
def volume(self) -> float:
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
@property
def filter(self) -> dict:
return self._filter
@filter.setter
def filter(self, value: dict) -> None:
self._filter = value
@property
def duration(self) -> float:
return self._duration
@property
def position(self) -> float:
return round(
self._position
- (
self.AudioFifo.samples
/ 960
/ 50
* (self.filter["atempo"] if "atempo" in self.filter else 1.0)
),
2,
)
def read(self) -> bytes:
if not self.BufferLoader:
self.start()
if not self.AudioFifo:
return
Data = self.AudioFifo.read()
if not Data and self._loading.locked():
while self._loading.locked():
if not self._waitforread.locked():
self._waitforread.acquire()
self._waitforread.acquire()
Data = self.AudioFifo.read()
if Data:
break
if Data and self.volume != 1.0:
Data = audioop.mul(Data, 2, min(self._volume, 2.0))
return Data
def _seek(self, offset: float, *args, **kwargs) -> None:
with withLock(self._seeking):
if not self.Container:
if not self._loading.locked():
self.Container = av.open(
self.Source.Source, options=self.Source.AVOption
)
else:
while not self.Container:
pass
kwargs["any_frame"] = True
self.Container.seek(round(max(offset, 1) * 1000000), *args, **kwargs)
self.reload()
def seek(self, offset: float, *args, **kwargs) -> Coroutine:
return self.loop.run_in_executor(
None, functools.partial(self._seek, offset, *args, **kwargs)
)
def reload(self) -> None:
self._haveToReloadResampler.set()
if not self._loading.locked():
if self._end.is_set():
self._end.clear()
self.start()
def start(self) -> None:
self.BufferLoader = Loader(self)
self.BufferLoader.start()
def stop(self) -> bool:
self.stopped = True
return self.stopped
def is_opus(self) -> bool:
return False
def cleanup(self) -> None:
self._end.set()
if self.AudioFifo and not self.AudioFifo.haveToFillBuffer.is_set():
self.AudioFifo.haveToFillBuffer.clear()
self.AudioFifo = None
class Loader(threading.Thread):
def __init__(self, AudioSource: PyAVSource) -> None:
threading.Thread.__init__(self)
self.daemon = True
self.Source = AudioSource
self.Resampler = None
self.Filter = {}
self.FilterGraph = None
def _do_run(self) -> None:
with withLock(self.Source._loading):
if not self.Source.Container:
self.Source.Container = av.open(
self.Source.Source, options=self.Source.AVOption
)
self.Source._duration = round(self.Source.Container.duration / 1000000, 2)
self.Source.selectAudioStream = self.Source.Container.streams.audio[0]
self.Source.FrameGenerator = self.Source.Container.decode(
self.Source.selectAudioStream
)
while not self.Source._end.is_set():
if self.Source.filter != self.Filter:
self.Filter = self.Source.filter
if self.Source.filter:
self.FilterGraph = AudioFilter()
self.FilterGraph.selectAudioStream = (
self.Source.selectAudioStream
)
self.FilterGraph.setFilters(self.Filter)
else:
self.FilterGraph = None
if not self.Resampler or self.Source._haveToReloadResampler.is_set():
self.Resampler = av.AudioResampler(
format=av.AudioFormat("s16").packed, layout="stereo", rate=48000
)
self.Source._haveToReloadResampler.clear()
_seek_locked = False
if self.Source._seeking.locked():
self.Source._seeking.acquire()
_seek_locked = True
Frame = next(self.Source.FrameGenerator, None)
if _seek_locked:
self.Source._seeking.release()
self.Source.AudioFifo.reset()
if not Frame:
self.Source.stop()
break
_current_position = float(Frame.pts * Frame.time_base)
if self.FilterGraph:
self.FilterGraph.push(Frame)
Frame = self.FilterGraph.pull()
if not Frame:
continue
Frame.pts = None
try:
Frame = self.Resampler.resample(Frame)
except ValueError:
self.Source._haveToReloadResampler.set()
continue
if self.Source.AudioFifo:
if not self.Source.AudioFifo.haveToFillBuffer.is_set():
self.Source.AudioFifo.haveToFillBuffer.wait()
self.Source.AudioFifo.write(Frame)
self.Source._position = _current_position
if self.Source._waitforread.locked():
self.Source._waitforread.release()
def run(self) -> None:
try:
self._do_run()
except:
traceback.print_exc()
finally:
if self.Source.Container:
self.Source.Container.close()
self.Source.Container = None
self.Source.stop()
|
#!/usr/bin/env python
#____________________________________________________________
#
#
# A very simple way to make plots with ROOT via an XML file
#
# Francisco Yumiceva
# yumiceva@fnal.gov
#
# Fermilab, 2010
#
#____________________________________________________________
"""
ntuplemaker
A very simple script to plot the beam spot data stored in condDB
usage: %prog -t <tag name>
-a, --auth = AUTH: DB authorization path. online(/nfshome0/popcondev/conddb).
-b, --batch : Run ROOT in batch mode.
-c, --create = CREATE: name for beam spot data file.
-d, --data = DATA: input beam spot data file.
-D, --destDB = DESTDB: destination DB string. online(oracle://cms_orcon_prod/CMS_COND_31X_BEAMSPOT).
-i, --initial = INITIAL: First IOV. Options: run number, or run:lumi, eg. \"133200:21\"
-f, --final = FINAL: Last IOV. Options: run number, or run:lumi
-o, --output = OUTPUT: filename of ROOT file with plots.
-x, --xcrossing = XCROSSING : Bunch crossing number.
Francisco Yumiceva (yumiceva@fnal.gov)
Fermilab 2010
"""
from __future__ import print_function
from builtins import range
import os, string, re, sys, math
import commands, time
from BeamSpotObj import BeamSpot
from IOVObj import IOV
from CommonMethods import *
try:
import ROOT
except:
print("\nCannot load PYROOT, make sure you have setup ROOT in the path")
print("and pyroot library is also defined in the variable PYTHONPATH, try:\n")
if (os.getenv("PYTHONPATH")):
print(" setenv PYTHONPATH ${PYTHONPATH}:$ROOTSYS/lib\n")
else:
print(" setenv PYTHONPATH $ROOTSYS/lib\n")
sys.exit()
from ROOT import *
from array import array
def getFill( json, run ):
thefill = 0
run = int(run)
keys = json.keys()
for i in keys:
run0 = int(json[i][0])
run1 = int(json[i][1])
if run>= run0 and run<=run1:
thefill = i
return int(thefill)
if __name__ == '__main__':
# fill and runs
FillList = {}
runsfile = open("FillandRuns.txt")
for line in runsfile:
if line.find('fill:') != -1:
aline = line.split()
afill = aline[1]
run0 = aline[3]
run1 = aline[5]
FillList[int(afill)] = [int(run0),int(run1)]
#print FillList
# create ntuple
gROOT.ProcessLine(
"struct spot {\
Float_t position[3];\
Float_t posError[3];\
Float_t width[3];\
Float_t widthError[3];\
Float_t slope[2];\
Float_t slopeError[2];\
Float_t time[2];\
Int_t run;\
Int_t lumi[2];\
Int_t fill;\
};" );
bntuple = spot()
fntuple = TFile( 'bntuple.root', 'RECREATE' )
tbylumi = TTree( 'bylumi', 'beam spot data lumi by lumi' )
tbylumi.Branch('fill', AddressOf( bntuple, 'fill'), 'fill/I' )
tbylumi.Branch('run', AddressOf( bntuple, 'run'), 'run/I' )
tbylumi.Branch('lumi', AddressOf( bntuple, 'lumi'), 'lumi[2]/I' )
tbylumi.Branch('position', AddressOf( bntuple, 'position'),'position[3]/F')
tbylumi.Branch('posErr', AddressOf( bntuple, 'posError'),'posError[3]/F')
tbylumi.Branch('width', AddressOf( bntuple, 'width'),'width[3]/F')
tbylumi.Branch('widthErr', AddressOf( bntuple, 'widthError'),'widthError[3]/F')
tbylumi.Branch('slope', AddressOf( bntuple, 'slope'),'slope[2]/F')
tbylumi.Branch('slopeErr', AddressOf( bntuple, 'slopeError'),'slopeError[2]/F')
tbylumi.Branch('time', AddressOf( bntuple, 'time'),'time[2]/F')
tbyIOV = TTree( 'byIOV', 'beam spot data by IOV' )
tbyIOV.Branch('fill', AddressOf( bntuple, 'fill'), 'fill/I' )
tbyIOV.Branch('run', AddressOf( bntuple, 'run'), 'run/I' )
tbyIOV.Branch('lumi', AddressOf( bntuple, 'lumi'), 'lumi[2]/I' )
tbyIOV.Branch('position', AddressOf( bntuple, 'position'),'position[3]/F')
tbyIOV.Branch('posErr', AddressOf( bntuple, 'posError'),'posError[3]/F')
tbyIOV.Branch('width', AddressOf( bntuple, 'width'),'width[3]/F')
tbyIOV.Branch('widthErr', AddressOf( bntuple, 'widthError'),'widthError[3]/F')
tbyIOV.Branch('slope', AddressOf( bntuple, 'slope'),'slope[2]/F')
tbyIOV.Branch('slopeErr', AddressOf( bntuple, 'slopeError'),'slopeError[2]/F')
tbyIOV.Branch('time', AddressOf( bntuple, 'time'),'time[2]/F')
tbyrun = TTree( 'byrun', 'beam spot data by run' )
tbyrun.Branch('fill', AddressOf( bntuple, 'fill'), 'fill/I' )
tbyrun.Branch('run', AddressOf( bntuple, 'run'), 'run/I' )
tbyrun.Branch('lumi', AddressOf( bntuple, 'lumi'), 'lumi[2]/I' )
tbyrun.Branch('position', AddressOf( bntuple, 'position'),'position[3]/F')
tbyrun.Branch('posErr', AddressOf( bntuple, 'posError'),'posError[3]/F')
tbyrun.Branch('width', AddressOf( bntuple, 'width'),'width[3]/F')
tbyrun.Branch('widthErr', AddressOf( bntuple, 'widthError'),'widthError[3]/F')
tbyrun.Branch('slope', AddressOf( bntuple, 'slope'),'slope[2]/F')
tbyrun.Branch('slopeErr', AddressOf( bntuple, 'slopeError'),'slopeError[2]/F')
tbyrun.Branch('time', AddressOf( bntuple, 'time'),'time[2]/F')
# COMMAND LINE OPTIONS
#################################
option,args = parse(__doc__)
if not args and not option: exit()
if not option.data:
print(" need to provide beam spot data file")
exit()
if option.batch:
ROOT.gROOT.SetBatch()
datafilename = "tmp_beamspot.dat"
if option.create:
datafilename = option.create
getDBdata = True
if option.data:
getDBdata = False
IOVbase = 'lumibase'
firstRun = "0:0"
lastRun = "4999999999:4999999999"
if option.initial:
firstRun = option.initial
if option.final:
lastRun = option.final
# GET IOVs
################################
if getDBdata:
print(" read DB to get list of IOVs for the given tag")
acommand = 'cmscond_list_iov -c frontier://PromptProd/CMS_COND_31X_BEAMSPOT -P /afs/cern.ch/cms/DB/conddb -t '+ tag
tmpstatus = commands.getstatusoutput( acommand )
tmplistiov = tmpstatus[1].split('\n')
#print tmplistiov
iovlist = []
passline = False
iline = jline = 0
totlines = len(tmplistiov)
for line in tmplistiov:
if line.find('since') != -1:
passline = True
jline = iline
if passline and iline > jline and iline < totlines-1:
linedata = line.split()
#print linedata
aIOV = IOV()
aIOV.since = int(linedata[0])
aIOV.till = int(linedata[1])
iovlist.append( aIOV )
iline += 1
print(" total number of IOVs = " + str(len(iovlist)))
# GET DATA
################################
otherArgs = ''
if option.destDB:
otherArgs = " -d " + option.destDB
if option.auth:
otherArgs = otherArgs + " -a "+ option.auth
print(" get beam spot data from DB for IOVs. This can take a few minutes ...")
tmpfile = open(datafilename,'w')
for iIOV in iovlist:
passiov = False
tmprunfirst = firstRun
tmprunlast = lastRun
tmplumifirst = 1
tmplumilast = 9999999
if IOVbase=="lumibase":
#tmprunfirst = int(firstRun.split(":")[0])
#tmprunlast = int(lastRun.split(":")[0])
#tmplumifirst = int(firstRun.split(":")[1])
#tmplumilast = int(lastRun.split(":")[1])
tmprunfirst = pack( int(firstRun.split(":")[0]) , int(firstRun.split(":")[1]) )
tmprunlast = pack( int(lastRun.split(":")[0]) , int(lasstRun.split(":")[1]) )
#print "since = " + str(iIOV.since) + " till = "+ str(iIOV.till)
if iIOV.since >= int(tmprunfirst) and int(tmprunlast) < 0 and iIOV.since <= int(tmprunfirst):
print(" IOV: " + str(iIOV.since))
passiov = True
if iIOV.since >= int(tmprunfirst) and int(tmprunlast) > 0 and iIOV.till <= int(tmprunlast):
print(" IOV: " + str(iIOV.since) + " to " + str(iIOV.till))
passiov = True
if iIOV.since >= int(tmprunlast) and iIOV.till >= 4294967295:
print(" IOV: " + str(iIOV.since) + " to " + str(iIOV.till))
passiov = True
if passiov:
acommand = 'getBeamSpotDB.py -t '+ tag + " -r " + str(iIOV.since) +otherArgs
if IOVbase=="lumibase":
tmprun = unpack(iIOV.since)[0]
tmplumi = unpack(iIOV.since)[1]
acommand = 'getBeamSpotDB.py -t '+ tag + " -r " + str(tmprun) +" -l "+tmplumi +otherArgs
status = commands.getstatusoutput( acommand )
tmpfile.write(status[1])
print(" beam spot data collected and stored in file " + datafilename)
tmpfile.close()
# PROCESS DATA
###################################
# check if input data exists if given
if option.data:
if os.path.isdir(option.data):
tmp = commands.getstatusoutput("ls "+option.data)
files = tmp[1].split()
datafilename = "combined_all.txt"
output = open(datafilename,"w")
for f in files:
if os.path.isdir(option.data+"/"+f) is False:
input = open(option.data +"/"+f)
output.writelines(input.readlines())
output.close()
print(" data files have been collected in "+datafilename)
elif os.path.exists(option.data):
datafilename = option.data
else:
print(" input beam spot data DOES NOT exist, file " + option.data)
exit()
listbeam = []
if option.xcrossing:
listmap = readBeamSpotFile(datafilename,listbeam,IOVbase,firstRun,lastRun)
# bx
print("List of bunch crossings in the file:")
print(listmap.keys())
listbeam = listmap[option.Xrossing]
else:
readBeamSpotFile(datafilename,listbeam,IOVbase,firstRun,lastRun)
sortAndCleanBeamList(listbeam,IOVbase)
###################################
for ii in range(0,len(listbeam)):
ibeam = listbeam[ii]
bntuple.position = array('f', [float(ibeam.X), float(ibeam.Y), float(ibeam.Z)])
bntuple.posError = array('f', [float(ibeam.Xerr),float(ibeam.Yerr),float(ibeam.Zerr)])
bntuple.width = array('f', [float(ibeam.beamWidthX), float(ibeam.beamWidthY), float(ibeam.sigmaZ)])
bntuple.widthError = array('f',[float(ibeam.beamWidthXerr),float(ibeam.beamWidthYerr),float(ibeam.sigmaZerr)])
bntuple.run = int(ibeam.Run)
bntuple.fill = int( getFill( FillList, int(ibeam.Run) ) )
bntuple.lumi = array('i', [int(ibeam.IOVfirst),int(ibeam.IOVlast)])
line = ibeam.IOVBeginTime
begintime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
line = ibeam.IOVEndTime
endtime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
bntuple.time = array('f', [begintime, endtime])
tbylumi.Fill()
iovlist = listbeam
iovlist = createWeightedPayloads("tmp.txt",iovlist,False)
for ii in range(0,len(iovlist)):
ibeam = iovlist[ii]
bntuple.position = array('f', [float(ibeam.X), float(ibeam.Y), float(ibeam.Z)])
bntuple.posError = array('f', [float(ibeam.Xerr),float(ibeam.Yerr),float(ibeam.Zerr)])
bntuple.width = array('f', [float(ibeam.beamWidthX), float(ibeam.beamWidthY), float(ibeam.sigmaZ)])
bntuple.widthError = array('f',[float(ibeam.beamWidthXerr),float(ibeam.beamWidthYerr),float(ibeam.sigmaZerr)])
bntuple.run = int(ibeam.Run)
bntuple.fill = int( getFill( FillList, int(ibeam.Run) ) )
bntuple.lumi = array('i', [int(ibeam.IOVfirst),int(ibeam.IOVlast)])
line = ibeam.IOVBeginTime
begintime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
line = ibeam.IOVEndTime
endtime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
bntuple.time = array('f', [begintime, endtime])
tbyIOV.Fill()
weightedlist = listbeam
weightedlist = createWeightedPayloads("tmp.txt",weightedlist,True)
for ii in range(0,len(weightedlist)):
ibeam = weightedlist[ii]
bntuple.position = array('f', [float(ibeam.X), float(ibeam.Y), float(ibeam.Z)])
bntuple.posError = array('f', [float(ibeam.Xerr),float(ibeam.Yerr),float(ibeam.Zerr)])
bntuple.width = array('f', [float(ibeam.beamWidthX), float(ibeam.beamWidthY), float(ibeam.sigmaZ)])
bntuple.widthError = array('f',[float(ibeam.beamWidthXerr),float(ibeam.beamWidthYerr),float(ibeam.sigmaZerr)])
bntuple.run = int(ibeam.Run)
bntuple.fill = int( getFill( FillList, int(ibeam.Run) ) )
bntuple.lumi = array('i', [int(ibeam.IOVfirst),int(ibeam.IOVlast)])
line = ibeam.IOVBeginTime
begintime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
line = ibeam.IOVEndTime
endtime = time.mktime( time.strptime(line.split()[0] + " " + line.split()[1] + " " + line.split()[2],"%Y.%m.%d %H:%M:%S %Z") )
bntuple.time = array('f', [begintime, endtime])
tbyrun.Fill()
os.system('rm tmp.txt')
fntuple.cd()
tbylumi.Write()
tbyIOV.Write()
tbyrun.Write()
fntuple.Close()
# CLEAN temporal files
###################################
#os.system('rm tmp_beamspotdata.log')
|
"""A package (for Nevow) for defining the schema, validation and rendering of
HTML forms.
"""
version_info = (0, 9, 3)
version = '.'.join([str(i) for i in version_info])
from nevow import static
from formal.types import *
from formal.validation import *
from formal.widget import *
from formal.widgets.restwidget import *
from formal.widgets.multiselect import *
from formal.form import Form, Field, Group, ResourceMixin, renderForm
from formal import iformal
def widgetFactory(widgetClass, *a, **k):
def _(original):
return widgetClass(original, *a, **k)
return _
try:
import pkg_resources
except ImportError:
import os.path
defaultCSS = static.File(os.path.join(os.path.split(__file__)[0], 'formal.css'))
formsJS = static.File(os.path.join(os.path.split(__file__)[0], 'js'))
else:
from formal.util import LazyResource
defaultCSS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'formal.css')))
formsJS = LazyResource(lambda: static.File(pkg_resources.resource_filename('formal', 'js')))
del LazyResource
# Register standard adapters
from twisted.python.components import registerAdapter
from formal import converters
from formal.util import SequenceKeyLabelAdapter
registerAdapter(TextInput, String, iformal.IWidget)
registerAdapter(TextInput, Integer, iformal.IWidget)
registerAdapter(TextInput, Float, iformal.IWidget)
registerAdapter(Checkbox, Boolean, iformal.IWidget)
registerAdapter(DatePartsInput, Date, iformal.IWidget)
registerAdapter(TextInput, Time, iformal.IWidget)
registerAdapter(FileUploadRaw, File, iformal.IWidget)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.IKey)
registerAdapter(SequenceKeyLabelAdapter, tuple, iformal.ILabel)
registerAdapter(converters.NullConverter, String, iformal.IStringConvertible)
registerAdapter(converters.DateToDateTupleConverter, Date, iformal.IDateTupleConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IBooleanConvertible)
registerAdapter(converters.BooleanToStringConverter, Boolean, iformal.IStringConvertible)
registerAdapter(converters.IntegerToStringConverter, Integer, iformal.IStringConvertible)
registerAdapter(converters.FloatToStringConverter, Float, iformal.IStringConvertible)
registerAdapter(converters.DateToStringConverter, Date, iformal.IStringConvertible)
registerAdapter(converters.TimeToStringConverter, Time, iformal.IStringConvertible)
registerAdapter(converters.NullConverter, File, iformal.IFileConvertible)
registerAdapter(converters.NullConverter, Sequence, iformal.ISequenceConvertible)
try:
Decimal
except NameError:
pass
else:
registerAdapter(TextInput, Decimal, iformal.IWidget)
registerAdapter(converters.DecimalToStringConverter, Decimal, iformal.IStringConvertible)
del SequenceKeyLabelAdapter
del registerAdapter
|
# !/usr/bin/env python3
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
import argparse
import logging
import paddle
class UnpackDataLoader(paddle.io.DataLoader):
def __init__(self, *args, **kwargs):
super(UnpackDataLoader, self).__init__(*args, batch_size=1, **kwargs)
def __iter__(self):
return ([yy[0] for yy in y]
for y in super(UnpackDataLoader, self).__iter__())
def create_if_not_exists(dir):
try:
dir.mkdir(parents=True)
except FileExistsError:
pass
return dir
def get_warmup_and_linear_decay(max_steps, warmup_steps):
return lambda step: min(step / warmup_steps, 1. - (step - warmup_steps) / (max_steps - warmup_steps))
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class ColorgradingGetGlobalNode(Node, ArmLogicTreeNode):
'''Colorgrading Get Global node'''
bl_idname = 'LNColorgradingGetGlobalNode'
bl_label = 'Colorgrading Get Global'
bl_icon = 'QUESTION'
def init(self, context):
self.outputs.new('NodeSocketFloat', 'Whitebalance')
self.outputs.new('NodeSocketVector', 'Tint')
self.outputs.new('NodeSocketVector', 'Saturation')
self.outputs.new('NodeSocketVector', 'Contrast')
self.outputs.new('NodeSocketVector', 'Gamma')
self.outputs.new('NodeSocketVector', 'Gain')
self.outputs.new('NodeSocketVector', 'Offset')
add_node(ColorgradingGetGlobalNode, category='Postprocess')
|
import base64
import random
import re
from random import randint
import discord
from discord.ext.commands import Cog, Context, command, group, cooldown, BucketType, CommandError
import missile
from dimsecret import debug
max_pp_size = 69
guild_id = 675477913411518485
spam_ch_id = 723153902454964224
bot_ch_id = 718210372561141771
def encode(text: str) -> str:
"""Converts the given string to base64"""
b: bytes = text.encode()
encoded: bytes = base64.b64encode(b)
return encoded.decode()
def decode(text: str) -> str:
b: bytes = text.encode()
decoded: bytes = base64.b64decode(b)
return decoded.decode()
class BasePPException(CommandError):
def __init__(self, message=None, *args):
self.message = message
super().__init__(message, args)
def __str__(self):
return self.message
class PPNotFound(BasePPException):
def __init__(self, target_is_sender: bool):
if target_is_sender:
super().__init__("Please set up your pp by `{0}pp`!")
else:
super().__init__('Target has no pp.')
class PPStunned(BasePPException):
def __init__(self, target_is_sender: bool):
if target_is_sender:
super().__init__('Your pp is stunned! Please use `{0}pp sf` to remove the effect!')
else:
super().__init__('Target is stunned!')
class PPLocked(BasePPException):
def __init__(self, target_is_sender: bool):
if target_is_sender:
super().__init__('Your pp is locked! Please use `{0}pp lock` to unlock!')
else:
super().__init__('Target has enabled lock!')
class PP:
def __init__(self, size: int, viagra, sesami, stun=0):
self.size: int = size
self.viagra: int = viagra # -1: Not available 0: Not activated 1-3: rounds left
self.score = 0
self.sesami_oil: bool = sesami
self.stun: int = stun
self.lock: bool = False
def draw(self) -> str:
"""Returns the string for displaying pp"""
description = f'Ɛ{"Ξ" * self.size}>'
if self.lock:
description = f"🔒Locked\n{description}"
if self.viagra > 0:
description = f'**{description}**\nViagra rounds left: {self.viagra}'
elif self.viagra == 0:
description += '\nViagra available!'
if self.sesami_oil:
description += '\nSesami oil'
if self.size == max_pp_size:
description += '\n**MAX POWER**'
if self.stun:
description += f'\n**STUNNED:** {self.stun} rounds left'
return description
def check_lock(self, b):
if self.lock:
raise PPLocked(b)
return self
class BitBay(Cog):
"""Utilities for 128BB
Version 1.4"""
def __init__(self, bot):
self.bot: missile.Bot = bot
self.organs: dict = {} # Dict for storing pp size
@Cog.listener()
async def on_message(self, msg: discord.Message):
"""Message Pattern Matching logic"""
if msg.guild and (msg.guild.id == guild_id or debug) and not msg.author.bot:
if re.search(r".*BSoD", msg.content):
await msg.reply('https://discord.com/channels/675477913411518485/675477914019430423/825823145315270687')
return
if re.search(r"^((?!n('?t|o)).)*(play.*(3D All Star|3d?AS))$", msg.content, re.IGNORECASE):
await msg.reply('YOU WHAT??? <:pepestab:725176431121793084>')
return
if re.search(r".*(no one|who|don'?t) care", msg.content, re.IGNORECASE):
await msg.reply('I CARE, BITCH')
return
match = re.search(r".*(where|how) .*?(get|download|find|obtain|acquire) ", msg.content, re.IGNORECASE)
if match: # Download-related
match = msg.content
# match = match.string[match.span()[1]:]
if re.search(r"(.* |^)(switch|yuzu|ryu)", match, re.IGNORECASE):
if re.search(r"(.* |^)(game|nsp|xci|rom)", match, re.IGNORECASE):
await msg.reply("Please use <#730596209701421076>, don't use FitGirl repacks.")
elif re.search(r"(.* |^)shader", match, re.IGNORECASE):
await msg.reply("<#709944999399260190>")
elif re.search(r"(.* |^)key", match, re.IGNORECASE):
await msg.reply("<#702908846565490708>")
elif re.search(r"(.* |^)change ?log", match, re.IGNORECASE):
await msg.reply("<#749927995183202376>")
elif re.search(r"(.* |^).*mod", match, re.IGNORECASE):
await msg.reply("Please check pins in <#702621234835226744>")
elif re.search(r"(.* |^)save", match, re.IGNORECASE):
await msg.reply("<#718565804345393182>")
elif re.search(r"(.* |^)mii", match, re.IGNORECASE):
await msg.reply("<#731478871823613962>")
elif re.search(r"(.* |^)firmware", match, re.IGNORECASE):
await msg.reply("Yuzu doesn't need firmware. Unsubscribe the guy that said it.\nSwitch firmware"
" link is in the oldest pin at <#718990080387317850> but I PMed you")
await msg.author.send(decode('aHR0cHM6Ly9kYXJ0aHN0ZXJuaWUubmV0L3N3aXRjaC1maXJtd2FyZXMv'))
elif re.search(r"(.* |^)(cemu|wii ?u)", match, re.IGNORECASE):
await msg.reply("May I suggest you <#718989936837263450> pins?")
elif re.search(r"(.* |^)(citra|3ds) ", match, re.IGNORECASE):
await msg.reply("May I suggest you <#750213635975938112> pins?")
elif re.search(r"(.* |^)(gc|gamecube|wii|dolphin) ", match, re.IGNORECASE):
await msg.reply("May I suggest you <#750178026704207944> pins?")
elif re.search(r"(.* |^)n?ds", match, re.IGNORECASE):
await msg.reply("May I suggest you <#749996667511767090> pins?")
elif re.search(r"(.* |^)(rom|game|shader|mod|key|save|mii|firmware)", match, re.IGNORECASE):
await msg.reply('Please specify the emulator you want e.g. `Where download switch games`\n'
'Tips: You can send `d.dec <base64>` to decode all those aHxxxx text!')
elif re.search(r"(.* |^)amiibo", match, re.IGNORECASE):
await msg.reply('<#796160202067017789>')
@command(aliases=('enc',))
async def encode(self, ctx: Context, *, url: str):
"""Encodes base64 via command"""
if ctx.channel.type == discord.ChannelType.text:
await ctx.message.delete()
if missile.is_url(url):
await ctx.send(f'<{self.bot.ip}b64d?s={encode(url)}>')
else:
url = ctx.author.mention + ': ' + url
await ctx.send(encode(url))
@command(aliases=('dec',))
async def decode(self, ctx: Context, content: str):
"""Decodes base64 via command"""
import binascii
try:
await ctx.author.send(decode(content))
await ctx.message.add_reaction('✅')
except (UnicodeDecodeError, binascii.Error):
await ctx.send('Malformed base64 string.')
@staticmethod
def pp_embed(user: discord.User, pp: PP):
return missile.Embed(user.display_name + "'s pp", pp.draw())
def get_pp(self, ctx: Context, target_id: int):
if self.organs.get(target_id, None):
return self.organs[target_id]
raise PPNotFound(ctx.author.id == target_id)
def get_pp_checked(self, ctx: Context, target_id: int):
pp = self.get_pp(ctx, target_id)
b = ctx.author.id == target_id
if pp.stun:
raise PPStunned(b)
pp.check_lock(b)
return pp
@group(invoke_without_command=True)
async def pp(self, ctx: Context, user: discord.User = None):
"""
Wiki for the d.pp commands: https://github.com/TCLRainbow/DimBot/wiki/pp
"""
if user: # If target already has pp, allows modifying. Else throw PPNotFound as you can't initialise others
pp = self.get_pp(ctx, user.id)
pp.check_lock(ctx.author == user)
else: # Check if sender has pp as no target is specified
user = ctx.author
pp = self.organs.get(ctx.author.id, None)
if pp and pp.stun: # Checks whether the to-be-rolled PP is stunned
raise PPStunned(ctx.author.id == user.id)
# Randomises user's pp properties
size = randint(0, max_pp_size)
viagra = (randint(0, 100) < 25) - 1
sesami = randint(0, 100) < 10
if pp: # Updates a PP if exist
pp.size = size
pp.viagra = viagra
if sesami:
pp.sesami_oil = True
else: # Creates PP if not exist
pp = self.organs[user.id] = PP(size, viagra, sesami)
await ctx.reply(embed=self.pp_embed(user, pp))
@pp.command()
async def info(self, ctx: Context, user: discord.User = None):
"""Shows the pp info"""
user = user if user else ctx.author
pp = self.get_pp(ctx, user.id)
await ctx.reply(embed=missile.Embed(f'pp size: {pp.size}', pp.draw()))
@pp.command()
async def slap(self, ctx: Context, user: discord.User):
"""Use pp to slap others"""
pp = self.get_pp(ctx, ctx.author.id)
await ctx.send(embed=missile.Embed(description=pp.draw(), thumbnail=user.avatar_url))
@pp.command()
@missile.is_rainbow()
async def max(self, ctx: Context, target: discord.User = None, viagra=True, sesami=True):
target = target if target else ctx.author
viagra -= 1
self.organs[target.id] = PP(max_pp_size, viagra, sesami)
await ctx.reply(embed=self.pp_embed(target, self.organs[target.id]))
@pp.command()
async def min(self, ctx: Context):
"""Minimises your pp strength"""
pp = self.get_pp(ctx, ctx.author.id)
pp = PP(0, -1, False, pp.stun)
await ctx.reply(embed=self.pp_embed(ctx.author, pp))
@pp.command()
async def cut(self, ctx: Context):
"""Cuts your pp"""
# Internally this removes the user from self.organs
pp = self.get_pp(ctx, ctx.author.id)
if await self.bot.ask_reaction(ctx, '⚠Cutting your pp also resets your score! Are you sure?'):
self.organs.pop(ctx.author.id)
await ctx.send(embed=discord.Embed(
title=ctx.author.display_name + "'s penis",
description=f"Ɛ\n{'Ξ' * pp.size}>",
color=discord.Color.red()))
@pp.command(aliases=('sf',))
@cooldown(rate=1, per=10.0, type=BucketType.user) # Each person can only call this once per 10s
async def swordfight(self, ctx: Context, user: discord.User = None):
"""Use your pp as a weapon and fight"""
if not user:
user = self.bot.get_user(random.choice(list(self.organs.keys())))
my = self.get_pp(ctx, ctx.author.id).check_lock(True)
his = self.get_pp(ctx, user.id).check_lock(ctx.author == user)
content = ''
if my.stun:
stun_msg = 'Focusing energy on your muscle, your hand is slowly moving.'
my.stun -= 1
if not my.stun:
stun_msg += '\nWith a masculine roar, you are wielding your light saber again.'
await ctx.reply(stun_msg)
return
if his.sesami_oil:
his.sesami_oil = False
await ctx.reply('Your opponent instantly deflects your attack.')
return
xp = my.size - his.size
my.score += xp
if my.viagra > 1:
my.viagra -= 1
elif my.viagra == 1:
my.viagra = -1
my.size //= 2
content = f"{ctx.author} ran out of ammo!"
if my.size > his.size:
title = "VICTORY"
gain_msg = f"You gained **{xp}** score!"
elif my.size == his.size:
title = "TIE"
gain_msg = ''
else:
title = "LOST"
gain_msg = f"You lost **{-xp}** score!"
await ctx.send(
content=content,
embed=missile.Embed(title, f"**{ctx.author.name}'s pp:**\n{my.draw()}\n"
f"**{user.name}'s pp:**\n{his.draw()}\n\n{gain_msg}"))
@pp.command(aliases=('lb',))
async def leaderboard(self, ctx: Context):
"""Shows the pp leaderboard"""
self.organs = dict(
sorted(self.organs.items(), key=lambda item: item[1].score, reverse=True)) # Sort self.xp by score
base = 'pp score leaderboard:\n'
for key in self.organs.keys():
base += f"{self.bot.get_user(key).name}: **{self.organs[key].score}** "
await ctx.reply(base)
@pp.command()
async def viagra(self, ctx: Context):
"""In your pp, WE TRUST"""
pp = self.get_pp_checked(ctx, ctx.author.id)
if pp.viagra:
await ctx.reply('You are already one with your pp! Rounds left: ' + str(pp.viagra))
elif pp.viagra == 0:
pp.viagra = 3
pp.size *= 2
await ctx.send(f'{ctx.author.mention} has faith in his pp!!! New length: {pp.size}')
else:
await ctx.reply("You don't have viagra yet!")
@pp.command(aliases=('zen',))
async def zenitsu(self, ctx: Context, user: discord.User = None):
"""Stuns your opponent"""
my = self.get_pp_checked(ctx, ctx.author.id)
if my.sesami_oil and my.viagra == 0:
if not user:
user = self.bot.get_user(random.choice(list(self.organs.keys())))
his = self.get_pp_checked(ctx, user.id)
his.stun = 2
my.sesami_oil = my.viagra_available = False
await ctx.reply(
"https://i.pinimg.com/originals/0e/20/37/0e2037b27580b13d9141bc9cf0162b71.gif\n"
f"Inhaling thunder, you stunned {user}!")
else:
await ctx.reply("You need to have viagra available and sesami oil!")
@pp.command()
async def changelog(self, ctx: Context):
"""Shows the latest changelog of the PP command"""
await ctx.reply("""
**__May 8, 3:58AM GMT+1__** (Rocket Update 2)\n
Fixes a glitch where you can still attack others with lock on\n
Lock command now has a cool down of 30s
""")
@pp.command()
@cooldown(rate=1, per=30.0, type=BucketType.user) # Each person can only call this once per 30s
async def lock(self, ctx: Context):
pp = self.get_pp(ctx, ctx.author.id)
pp.lock = not pp.lock
await ctx.reply(f'Your pp is now {"" if pp.lock else "un"}locked.')
|
# -*- coding: utf-8 -*-
from .expr import *
def_Topic(
Title("Landau's function"),
Section("Definitions"),
Entries(
"32e430",
),
Section("Tables"),
Entries(
"177218",
),
Section("Arithmetic representations"),
Entries(
"7932c3",
),
Section("Asymptotics"),
Entries(
"a3ab2a",
),
Section("Bounds and inequalities"),
Entries(
"9697b8",
"3d5019",
"87d19b",
),
Section("Riemann hypothesis"),
Entries(
"65fa9f",
),
)
make_entry(ID("32e430"),
SymbolDefinition(LandauG, LandauG(n), "Landau's function"),
Description("Landau's function", LandauG(n), "gives the largest order of an element of the symmetric group", Subscript(S, n), "."),
Description("It can be defined arithmetically as the maximum least common multiple of the partitions of", n, ", as in", EntryReference("7932c3"), "."),
Description("The following table lists conditions such that", SourceForm(LandauG(n)), "is defined in Fungrim."),
Table(TableRelation(Tuple(P, Q), Implies(P, Q)),
TableHeadings(Description("Domain"), Description("Codomain")),
List(
Tuple(Element(n, ZZGreaterEqual(0)), Element(LandauG(n), ZZGreaterEqual(1))))),
References("https://oeis.org/A000793"))
# Tables
make_entry(ID("177218"),
Description("Table of", LandauG(n), "for", LessEqual(0, n, 100)),
Table(TableRelation(Tuple(n, y), Equal(LandauG(n), y)),
TableHeadings(n, LandauG(n)), TableSplit(4),
List(
Tuple(0, 1),
Tuple(1, 1),
Tuple(2, 2),
Tuple(3, 3),
Tuple(4, 4),
Tuple(5, 6),
Tuple(6, 6),
Tuple(7, 12),
Tuple(8, 15),
Tuple(9, 20),
Tuple(10, 30),
Tuple(11, 30),
Tuple(12, 60),
Tuple(13, 60),
Tuple(14, 84),
Tuple(15, 105),
Tuple(16, 140),
Tuple(17, 210),
Tuple(18, 210),
Tuple(19, 420),
Tuple(20, 420),
Tuple(21, 420),
Tuple(22, 420),
Tuple(23, 840),
Tuple(24, 840),
Tuple(25, 1260),
Tuple(26, 1260),
Tuple(27, 1540),
Tuple(28, 2310),
Tuple(29, 2520),
Tuple(30, 4620),
Tuple(31, 4620),
Tuple(32, 5460),
Tuple(33, 5460),
Tuple(34, 9240),
Tuple(35, 9240),
Tuple(36, 13860),
Tuple(37, 13860),
Tuple(38, 16380),
Tuple(39, 16380),
Tuple(40, 27720),
Tuple(41, 30030),
Tuple(42, 32760),
Tuple(43, 60060),
Tuple(44, 60060),
Tuple(45, 60060),
Tuple(46, 60060),
Tuple(47, 120120),
Tuple(48, 120120),
Tuple(49, 180180),
Tuple(50, 180180),
Tuple(51, 180180),
Tuple(52, 180180),
Tuple(53, 360360),
Tuple(54, 360360),
Tuple(55, 360360),
Tuple(56, 360360),
Tuple(57, 471240),
Tuple(58, 510510),
Tuple(59, 556920),
Tuple(60, 1021020),
Tuple(61, 1021020),
Tuple(62, 1141140),
Tuple(63, 1141140),
Tuple(64, 2042040),
Tuple(65, 2042040),
Tuple(66, 3063060),
Tuple(67, 3063060),
Tuple(68, 3423420),
Tuple(69, 3423420),
Tuple(70, 6126120),
Tuple(71, 6126120),
Tuple(72, 6846840),
Tuple(73, 6846840),
Tuple(74, 6846840),
Tuple(75, 6846840),
Tuple(76, 8953560),
Tuple(77, 9699690),
Tuple(78, 12252240),
Tuple(79, 19399380),
Tuple(80, 19399380),
Tuple(81, 19399380),
Tuple(82, 19399380),
Tuple(83, 38798760),
Tuple(84, 38798760),
Tuple(85, 58198140),
Tuple(86, 58198140),
Tuple(87, 58198140),
Tuple(88, 58198140),
Tuple(89, 116396280),
Tuple(90, 116396280),
Tuple(91, 116396280),
Tuple(92, 116396280),
Tuple(93, 140900760),
Tuple(94, 140900760),
Tuple(95, 157477320),
Tuple(96, 157477320),
Tuple(97, 232792560),
Tuple(98, 232792560),
Tuple(99, 232792560),
Tuple(100, 232792560),
)))
# Arithmetic representations
# todo: semantic markup for variable-length tuples (or better, partitions?)
make_entry(ID("7932c3"),
Formula(Equal(LandauG(n),
Maximum(SetBuilder(LCM(Subscript(s, 1), Ellipsis, Subscript(s, k)), Tuple(k, Subscript(s, i)),
And(Element(k, ZZGreaterEqual(0)), Element(Subscript(s, i), ZZGreaterEqual(1)), Equal(Sum(Subscript(s, i), Tuple(i, 1, k)), n)))))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(1))))
# Asymptotics
make_entry(ID("a3ab2a"),
Formula(Equal(SequenceLimit(Log(LandauG(n)) / Sqrt(n * Log(n)), n, Infinity), 1)))
# Bounds and inequalities
make_entry(ID("9697b8"),
Formula(LessEqual(Log(LandauG(n)), Sqrt(n*Log(n)) * (1 + (Log(Log(n))-Decimal("0.975"))/(2*Log(n))))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(4))),
References("Jean-Pierre Massias, Jean-Louis Nicolas and Guy Robin (1989), Effective bounds for the maximal order of an element in the symmetric group, Mathematics of Computation, 53, 118, 665--665, https://doi.org/10.1090/s0025-5718-1989-0979940-4"))
make_entry(ID("3d5019"),
Formula(GreaterEqual(Log(LandauG(n)), Sqrt(n*Log(n)))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(906))),
References("Jean-Pierre Massias, Jean-Louis Nicolas and Guy Robin (1989), Effective bounds for the maximal order of an element in the symmetric group, Mathematics of Computation, 53, 118, pp. 665-665, https://doi.org/10.1090/s0025-5718-1989-0979940-4"))
make_entry(ID("87d19b"),
Formula(LessEqual(Maximum(SetBuilder(p, p, And(Element(p, PP), Divides(p, LandauG(n))))), Decimal("1.328") * Sqrt(n*Log(n)))),
Variables(n),
Assumptions(Element(n, ZZGreaterEqual(5))),
References("Jon Grantham (1995), The largest prime dividing the maximal order of an element of S_n, 64, 209, pp. 407--210, https://doi.org/10.2307/2153344"))
# Riemann hypothesis
make_entry(ID("65fa9f"),
Formula(Equivalent(RiemannHypothesis,
ForAll(n, Element(n, ZZGreaterEqual(1)),
Less(Log(LandauG(n)), Where(Sqrt(f(n)), Equal(f(y), UniqueSolution(Brackets(Equal(LogIntegral(x), y)), x, Element(x, OpenInterval(1, Infinity))))))))),
References("Marc Deleglise, Jean-Louis Nicolas, The Landau function and the Riemann Hypothesis, https://arxiv.org/abs/1907.07664"))
|
# -*- coding: utf-8 -*-
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is a graphical display tool for candlestick charts + indicators
#It has a trading model inside.
#Import modules
from YahooGrabber import YahooGrabber
from YahooSourceDailyGrabber import YahooSourceDailyGrabber
import matplotlib.pyplot as plt
from matplotlib.finance import candlestick_ohlc
import matplotlib.dates as mdates
import numpy as np
#Assign ticker
string = 'STX'
#Request data
Asset = YahooGrabber(string)
#Trimmer
Asset = Asset[-100:]
#Make column that represents X axis
Asset['Index'] = Asset.index
#Format for mpl
Asset['IndexToNumber'] = Asset['Index'].apply(mdates.date2num)
#Format Dataframe to feed candlestick_ohlc()
AssetCopy = Asset[['IndexToNumber', 'Open', 'High', 'Low', 'Close', 'Adj Close']].copy()
#Create axe and define X and Y axis scale
figure, axe = plt.subplots(figsize = (10,5))
#Assign labels
plt.ylabel(string + ' Price')
plt.xlabel('Date')
#Variable windows
#donchianwidow is used to find the min/max of the price range to make the long/short signal
#Smaller donchain window = more likely double days
donchianwindow = 55
#ATRwindow is used for volatility position sizing
ATRwindow = 20
#stopwindow is used for trailing high/low used for long/short exits
stopwindow = 13
#Counter tracks iteration progress
Counter = 0
#SubIndex column is a secondary index, it only exists to help identify exits
AssetCopy['SubIndex'] = range(0,len(AssetCopy))
#Log Returns
AssetCopy['LogRet'] = np.log(AssetCopy['Adj Close']/AssetCopy['Adj Close'].shift(1))
AssetCopy['LogRet'] = AssetCopy['LogRet'].fillna(0)
#ATR calculation using ATRwindow
AssetCopy['Method1'] = AssetCopy['High'] - AssetCopy['Low']
AssetCopy['Method2'] = abs((AssetCopy['High'] - AssetCopy['Close'].shift(1)))
AssetCopy['Method3'] = abs((AssetCopy['Low'] - AssetCopy['Close'].shift(1)))
AssetCopy['Method1'] = AssetCopy['Method1'].fillna(0)
AssetCopy['Method2'] = AssetCopy['Method2'].fillna(0)
AssetCopy['Method3'] = AssetCopy['Method3'].fillna(0)
AssetCopy['TrueRange'] = AssetCopy[['Method1','Method2','Method3']].max(axis = 1)
#ATR in points; not %
AssetCopy['ATR'] = AssetCopy['TrueRange'].rolling(window = ATRwindow,
center=False).mean()
#Market top and bottom calculation
#Technical calculations
#Donchian Channel
AssetCopy['RollingMax'] = AssetCopy['High'].rolling(20).max()
AssetCopy['RollingMin'] = AssetCopy['Low'].rolling(20).min()
#SMA
AssetCopy['SMA5'] = AssetCopy['Close'].rolling(5).mean()
AssetCopy['SMA20'] = AssetCopy['Close'].rolling(20).mean()
#N period ATR Setup
AssetCopy['Method1'] = AssetCopy['High'] - AssetCopy['Low']
AssetCopy['Method2'] = abs((AssetCopy['High'] - AssetCopy['Close'].shift(1)))
AssetCopy['Method3'] = abs((AssetCopy['Low'] - AssetCopy['Close'].shift(1)))
AssetCopy['Method1'] = AssetCopy['Method1'].fillna(0)
AssetCopy['Method2'] = AssetCopy['Method2'].fillna(0)
AssetCopy['Method3'] = AssetCopy['Method3'].fillna(0)
AssetCopy['TrueRange'] = AssetCopy[['Method1','Method2','Method3']].max(axis = 1)
AssetCopy['4wkATRPoints'] = AssetCopy['TrueRange'].rolling(window = 20, center=False).mean()
AssetCopy['4wkATRPercent'] = AssetCopy['4wkATRPoints'] / AssetCopy['Close']
AssetCopy['ATRRollingMax'] = AssetCopy['4wkATRPercent'].rolling(20).max()
AssetCopy['ATRRollingMin'] = AssetCopy['4wkATRPercent'].rolling(20).min()
#Signal = Price </> min/max
#if price is greater than the max go long
AssetCopy['LongSignal'] = np.where(AssetCopy['High'] >= AssetCopy['RollingMax'].shift(1), 1, 0)
#if price is less than the min go short
AssetCopy['ShortSignal'] = np.where(AssetCopy['Low'] <= AssetCopy['RollingMin'].shift(1), 1, 0)
#If double signal days exist, then entry and P/L on those days will not be reflected correctly, spurious return stream
AssetCopy['DoubleDay'] = np.where(AssetCopy['LongSignal'] + AssetCopy['ShortSignal'] == 2, 1, 0)
#Next two lines combines long signal and short signal columns into a single column
#If there is a double day then a short entry is recorded
AssetCopy['Signal'] = np.where(AssetCopy['LongSignal'] == 1, 1, 0)
AssetCopy['Signal'] = np.where(AssetCopy['ShortSignal'] == 1, -1, AssetCopy['Signal'])
#if Rolling Min/Max is still being computed, stay out of market
AssetCopy['Signal'] = np.where(AssetCopy['RollingMax'] == np.nan, 0, AssetCopy['Signal'])
#Index values for segmenting data for trade analysis
SignalDates = list(AssetCopy['Signal'].loc[(AssetCopy['Signal'] != 0)].index)
#Trade ATR on signal day
AssetCopy['TradeATR'] = np.where(AssetCopy['Signal'] != 0, AssetCopy['ATR'].shift(1), np.nan)
#Exits other than initial 2 ATR stop, stopwindow is used here
#Asset1['LimitExitPrice'] = np.nan
AssetCopy['ShortExitPrice'] = AssetCopy['High'].rolling(window=stopwindow, center=False).max()
AssetCopy['LongExitPrice'] = AssetCopy['Low'].rolling(window=stopwindow, center=False).min()
#Declare columns to record entry price and initial 2 ATR stop for unit one
AssetCopy['EntryPriceUnitOne'] = np.nan
AssetCopy['StopPriceUnitOne'] = np.nan
#Be sure to check for double signal days, gaps on first unit entry, and gaps on exits.
#Default stops and entries
#Find the first trade of the signal period, so we can document entry prices
#Long entry first unit // enter one cent above previous high
AssetCopy['EntryPriceUnitOne'] = np.where(AssetCopy['Signal'] == 1,
AssetCopy['RollingMax'].shift(1) + .01, np.nan)
#Short entry first unit // enter one cent below previous low
AssetCopy['EntryPriceUnitOne'] = np.where(AssetCopy['Signal'] == -1,
AssetCopy['RollingMin'].shift(1) - .01, AssetCopy['EntryPriceUnitOne'])
#Overlay
axe.plot(AssetCopy['IndexToNumber'], AssetCopy['RollingMax'], color = 'green', label = 'RollingMax')
axe.plot(AssetCopy['IndexToNumber'], AssetCopy['RollingMin'], color = 'red', label = 'RollingMin')
axe.plot(AssetCopy['IndexToNumber'], AssetCopy['SMA5'], color = 'black', label = 'SMA5')
axe.plot(AssetCopy['IndexToNumber'], AssetCopy['SMA20'], color = 'yellow', label = 'SMA20')
#Signal triangles..
axe.scatter(AssetCopy.loc[AssetCopy['LongSignal'] == 1, 'IndexToNumber'].values, AssetCopy.loc[AssetCopy['LongSignal'] == 1, 'EntryPriceUnitOne'].values, label='skitscat', color='green', s=50, marker="^")
axe.scatter(AssetCopy.loc[AssetCopy['ShortSignal'] == 1, 'IndexToNumber'].values, AssetCopy.loc[AssetCopy['ShortSignal'] == 1, 'EntryPriceUnitOne'].values, label='skitscat', color='red', s=50, marker="v")
#Plot the DF values with the figure, object
candlestick_ohlc(axe, AssetCopy.values, width=.6, colorup='green', colordown='red')
axe.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
#For ATR
figure2, axe2 = plt.subplots(figsize = (10,2))
#Labels
plt.ylabel(string + ' ATR')
plt.xlabel('Date')
#ATR line graphs and rolling min/max
axe2.plot(AssetCopy['IndexToNumber'], AssetCopy['4wkATRPercent'], color = 'black', label = '4wkATRPercent')
axe2.plot(AssetCopy['IndexToNumber'], AssetCopy['ATRRollingMax'], color = 'green', label = 'ATRRollingMax')
axe2.plot(AssetCopy['IndexToNumber'], AssetCopy['ATRRollingMin'], color = 'red', label = 'ATRRollingMin')
#Date formatting
axe2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
#Save image to CWD..
#plt.savefig('TestingTesting.png')
#Display figure
#plt.show()
|
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.FATAL)
import sys
sys.path.append('../')
import tensorflow as tf
import numpy as np
import gpflow
from dgps_with_iwvi.layers import LatentVariableLayer, GPLayer
from dgps_with_iwvi.models import DGP_VI, DGP_IWVI
# class DirectlyParameterizedEncoder(gpflow.Parameterized):
# """
# No amortization is used; each datapoint element has an
# associated mean and variance of its latent variable.
#
# IMPORTANT: Not compatible with minibatches
# """
#
# def __init__(self, latent_dim, num_data,
# num_samples=None,
# mean=None,
# std=None,
# broadcast_axis=None,
# name=None):
# gpflow.Parameterized.__init__(self, name=name)
# self.latent_dim = latent_dim
# self.num_data = num_data
#
# if mean is None:
# mean = np.random.randn(num_data, latent_dim)
#
# if mean.shape != (num_data, latent_dim):
# raise ValueError("mean must have shape (num_data={}, latent_dim={})"
# .format(num_data, latent_dim))
#
# if std is None:
# std = np.ones((num_data, latent_dim)) * 1e-4
#
# self.mean = gpflow.Param(mean)
# self.std = gpflow.Param(std, transform=gpflow.transforms.positive)
# self.num_samples = num_samples
# self.broadcast_axis = broadcast_axis
#
# @gpflow.params_as_tensors
# def __call__(self, Z):
# if self.broadcast_axis is None:
# m = tf.tile(self.mean, [self.num_samples, 1])
# s = tf.tile(self.std, [self.num_samples, 1])
# elif self.broadcast_axis == 0:
# m = tf.tile(self.mean[None, :, :], [self.num_samples, 1, 1])
# s = tf.tile(self.std[None, :, :], [self.num_samples, 1, 1])
# elif self.broadcast_axis == 1:
# m = tf.tile(self.mean[:, None, :], [1, self.num_samples, 1])
# s = tf.tile(self.std[:, None, :], [1, self.num_samples, 1])
#
# return m, s
#
#
# class Data:
# N = 10
# Ns = 100
# Dx = 1
# Dy = 2
# M = 25
#
# np.random.seed(0)
#
# Xs = np.random.randn(Ns, Dx)
#
# X_mean = np.random.randn(N, Dx)
# X_var = np.random.uniform(low=1e-4, high=1e-1, size=(N, Dx))
#
# Z = np.random.randn(M, Dx)
#
# Y = np.concatenate([np.sin(10*X_mean), np.cos(10*X_mean)], 1)
#
#
# class Fixtures:
# kern = gpflow.kernels.RBF(1, lengthscales=0.1)
# lik = gpflow.likelihoods.Gaussian() # var = 1
# custom_config = gpflow.settings.get_settings()
# custom_config.numerics.jitter_level = 1e-18
# with gpflow.settings.temp_settings(custom_config):
#
# class ReferenceBGPLVM:
# model = gpflow.models.BayesianGPLVM(Data.X_mean,
# Data.X_var,
# Data.Y,
# Fixtures.kern,
# Data.M,
# Z=Data.Z)
#
# # get inducing point distribution
# mu, cov = model.predict_f_full_cov(Data.Z)
#
# # unwhiten
# std = np.linalg.cholesky(np.transpose(cov, [2, 0, 1]))
# K = Fixtures.kern.compute_K_symm(Data.Z)
# L = np.linalg.cholesky(K)
# L_inv = np.linalg.inv(L)
# cov_white = np.einsum('ab,bcp,dc->pad', L_inv, cov, L_inv)
#
# q_mu = np.linalg.solve(L, mu)
# q_sqrt = np.linalg.cholesky(cov_white + 1e-12 * np.eye(Data.M)[None, :, :])
#
# # predictions
# pred_m, pred_v = model.predict_f(Data.Xs)
# pred_m_full_cov, pred_v_full_cov = model.predict_f_full_cov(Data.Xs)
#
# # bound
# L = model.compute_log_likelihood()
#
#
# def test_bound_vs_gplvm():
# custom_config = gpflow.settings.get_settings()
# custom_config.numerics.jitter_level = 1e-18
# with gpflow.settings.temp_settings(custom_config):
# encoder_vi = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var**0.5,
# num_samples=1,
# broadcast_axis=None)
#
# encoder_iw = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var ** 0.5,
# num_samples=1,
# broadcast_axis=1)
#
# layers_vi = [LatentVariableLayer(Data.Dx, encoder=encoder_vi),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# layers_iw = [LatentVariableLayer(Data.Dx, encoder=encoder_iw),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# m_dgp_vi = DGP_VI(np.zeros((Data.N, 0)), Data.Y, layers_vi, Fixtures.lik, num_samples=1)
# m_dgp_iw = DGP_IWVI(np.zeros((Data.N, 0)), Data.Y, layers_iw, Fixtures.lik, num_samples=1)
#
# for model in [m_dgp_vi, m_dgp_iw]:
# model.layers[1].q_mu = ReferenceBGPLVM.q_mu
# model.layers[1].q_sqrt = ReferenceBGPLVM.q_sqrt
#
# L = [model.compute_log_likelihood() for _ in range(1000)]
#
# L_mean = np.average(L)
# L_stderr = np.std(L) / len(L) ** 0.5
#
# # check ground truth is within +-3 std deviation CI
# assert L_mean + 3 * L_stderr > ReferenceBGPLVM.L
# assert L_mean - 3 * L_stderr < ReferenceBGPLVM.L
#
#
# m, v = model.predict_f(Data.Xs)
# np.testing.assert_allclose(m, ReferenceBGPLVM.pred_m, atol=1e-6, rtol=1e-6)
# np.testing.assert_allclose(v, ReferenceBGPLVM.pred_v, atol=1e-6, rtol=1e-6)
#
# m_full, v_full = model.predict_f_full_cov(Data.Xs)
# v_full = np.transpose(v_full, [1, 2, 0])
# np.testing.assert_allclose(m_full, ReferenceBGPLVM.pred_m_full_cov, atol=1e-6, rtol=1e-6)
# np.testing.assert_allclose(v_full, ReferenceBGPLVM.pred_v_full_cov, atol=1e-6, rtol=1e-6)
#
#
# def test_IW_vs_VI():
# K = 10
# encoder_vi = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var ** 0.5,
# num_samples=K,
# broadcast_axis=None)
#
# encoder_iw = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var ** 0.5,
# num_samples=K,
# broadcast_axis=1)
#
# layers_vi = [LatentVariableLayer(Data.Dx, encoder=encoder_vi),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# layers_iw = [LatentVariableLayer(Data.Dx, encoder=encoder_iw),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# m_dgp_vi = DGP_VI(np.zeros((Data.N, 0)), Data.Y, layers_vi, Fixtures.lik, num_samples=K)
# m_dgp_iw = DGP_IWVI(np.zeros((Data.N, 0)), Data.Y, layers_iw, Fixtures.lik, num_samples=K)
#
# for model in [m_dgp_vi, m_dgp_iw]:
# model.layers[1].q_mu = ReferenceBGPLVM.q_mu
# model.layers[1].q_sqrt = ReferenceBGPLVM.q_sqrt
#
# L_vi = [m_dgp_vi.compute_log_likelihood() for _ in range(1000)]
# L_iw = [m_dgp_iw.compute_log_likelihood() for _ in range(1000)]
#
# L_vi_mean = np.average(L_vi)
# L_iw_mean = np.average(L_iw)
#
# # for K > 1 the IW estimate should be greater than the VI estimator
# assert L_vi_mean < L_iw_mean
#
#
# def test_IW_var_vs_VI_single_sample():
# K = 1
# encoder_vi = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var ** 0.5,
# num_samples=K,
# broadcast_axis=None)
#
# encoder_iw = DirectlyParameterizedEncoder(Data.Dx, Data.N,
# mean=Data.X_mean,
# std=Data.X_var ** 0.5,
# num_samples=K,
# broadcast_axis=1)
#
# layers_vi = [LatentVariableLayer(Data.Dx, encoder=encoder_vi),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# layers_iw = [LatentVariableLayer(Data.Dx, encoder=encoder_iw),
# GPLayer(Fixtures.kern, Data.Z, Data.Dy)]
#
# m_dgp_vi = DGP_VI(np.zeros((Data.N, 0)), Data.Y, layers_vi, Fixtures.lik, num_samples=K)
# m_dgp_iw = DGP_IWVI(np.zeros((Data.N, 0)), Data.Y, layers_iw, Fixtures.lik, num_samples=K)
#
#
# for model in [m_dgp_vi, m_dgp_iw]:
# model.layers[1].q_mu = ReferenceBGPLVM.q_mu
# model.layers[1].q_sqrt = ReferenceBGPLVM.q_sqrt
#
# L_vi = [m_dgp_vi.compute_log_likelihood() for _ in range(1000)]
# L_iw = [m_dgp_iw.compute_log_likelihood() for _ in range(1000)]
#
# L_vi_std = np.std(L_vi)
# L_iw_std = np.std(L_iw)
#
# # in the 1 sample case the variance of the VI estimator should be strictly less than the IW
# assert L_vi_std < L_iw_std
def test_pos_def():
# N = 10
# Dx = 3
# Dy = 1
# K = 5
from bayesian_benchmarks.data import get_regression_data
data = get_regression_data('wilson_3droad')
X = data.X_train
Y = data.Y_train
M = 128
from scipy.cluster.vq import kmeans2
Z = kmeans2(X, M, minit='points')[0]
N, Dx = X.shape
Dy = Y.shape[1]
K = 1
lik = gpflow.likelihoods.Gaussian(variance=0.1)
kern = gpflow.kernels.RBF(Dx, lengthscales=0.1)
X = np.random.randn(N, Dx)
Y = np.random.randn(N, Dy)
layers_vi = [LatentVariableLayer(Dx, XY_dim=Dx+Dy),
GPLayer(kern, Z, Dy)]
layers_iw = [LatentVariableLayer(Dx, XY_dim=Dx+Dy),
GPLayer(kern, Z, Dy)]
m_dgp_vi = DGP_VI(X, Y, layers_vi, lik, num_samples=K, minibatch_size=512)
m_dgp_iw = DGP_IWVI(X, Y, layers_iw, lik, num_samples=K, minibatch_size=512)
for model in [m_dgp_vi, m_dgp_iw]:
model.layers[-1].q_mu.set_trainable(False)
model.layers[-1].q_sqrt.set_trainable(False)
optimizer_adam = gpflow.train.AdamOptimizer(0.005)
adam_op = optimizer_adam.make_optimize_tensor(model)
optimizer_ng = gpflow.train.NatGradOptimizer(gamma=0.01)
ng_op = optimizer_ng.make_optimize_tensor(model,
var_list=[[model.layers[-1].q_mu,
model.layers[-1].q_sqrt]])
sess = model.enquire_session()
for _ in range(10):
print('{} {:.2f}'.format(_, sess.run(model.likelihood_tensor)))
sess.run(ng_op)
sess.run(adam_op)
L_vi = [m_dgp_vi.compute_log_likelihood() for _ in range(100)]
L_iw = [m_dgp_iw.compute_log_likelihood() for _ in range(100)]
L_vi = np.average(L_vi)
L_iw = np.average(L_iw)
print(L_vi, L_iw)
test_pos_def()
|
"""
Loading a pretrained model and using it for superresolution and denoising purposes
on the test-set.
Denoising_in_superresolution/src
@author: Angel Villar-Corrales
"""
import os
import json
from tqdm import tqdm
import torch
import lib.utils as utils
import lib.metrics as metrics
import lib.arguments as arguments
import lib.model_setup as model_setup
class Evaluate:
"""
Loading the model and performing superresolution in a few images
Args:
-----
exp_path: string
path to the experiment directory
checkpoint: integer
number of epochs corresponding to the checkpoint to load. -1 means trained model
"""
def __init__(self, exp_path, checkpoint=-1):
"""
Initializer of the evaluator object
"""
self.exp_path = exp_path
self.models_path = os.path.join(self.exp_path, "models")
self.plots_path = os.path.join(self.exp_path, "plots")
self.exp_data = utils.load_configuration_file(self.exp_path)
self.exp_data["training"]["batch_size"] = 1
self.exp_data["dataset"]["patches_per_img"] = 1
self.train_logs = utils.load_train_logs(self.exp_path)
self.checkpoint = checkpoint
utils.set_random_seed()
return
def load_dataset(self):
"""
Loading dataset and data loaders
"""
self.dataset, _, _, self.test_loader, self.num_channels = model_setup.load_dataset(self.exp_data)
return
def load_generalization_dataset(self, noise, std):
"""
Loading dataset and data loaders to evaluate the generalization capabilities of a model
Args:
-----
noise: string
type of noise used to corrup the test images
std: float
power of the corruption noise
"""
self.dataset, self.test_loader, self.num_channels = \
model_setup.load_generalization_dataset(exp_data=self.exp_data, noise=noise, std=std)
return
def load_model(self):
"""
Creating a model and loading the pretrained network parameters from the saved
state dictionary. Setting the model to use a GPU
"""
# getting model name given checkpoint
if(self.checkpoint < 0):
model_name = "model_trained"
else:
model_name = f"model_epoch_{self.checkpoint}"
path_to_model = os.path.join(self.models_path, model_name)
# making sure the model exists
if(not os.path.exists(path_to_model)):
print("ERROR!")
print(f"Model: {model_name} was not found in path {self.models_path}")
exit()
# creating model architecture
# setting up the device
torch.backends.cudnn.fastest = True
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# initializing the model and loading the state dicitionary
model = model_setup.setup_model(exp_data=self.exp_data, exp_path=self.exp_path)
model.load_state_dict(torch.load(path_to_model, map_location=self.device))
self.model = model.to(self.device)
# setting up model hyper-parameters
self.optimizer, self.loss_function, self.scheduler = model_setup.hyperparameter_setup(self.exp_data, self.model)
return
@torch.no_grad()
def test_model(self):
"""
Using the pretrained model to perform denoising and superresolution in the test
set measuring the metric values
Returns:
--------
results: dictionary
dict containing the average result for each of the metrics: loss, mse, mae, psnr, ssim, ms_ssim
"""
self.model.eval()
loss_list = []
mae_list = []
mse_list = []
psnr_list = []
ssim_list = []
ms_ssim_list = []
for _, (hr_imgs, lr_imgs, labels) in enumerate(tqdm(self.test_loader)):
hr_imgs = hr_imgs.to(self.device).float()
lr_imgs = lr_imgs.to(self.device).float()
# pretrained model expects input in range [-0.5, 0.5] and we were using [-1,1]
recovered_images = self.model(lr_imgs * 0.5) * 2
# setting images to the range [0,1]
hr_imgs, lr_imgs = metrics.denorm_img(hr_imgs), metrics.denorm_img(lr_imgs)
recovered_images = metrics.denorm_img(recovered_images)
loss = self.loss_function(hr_imgs, recovered_images)
loss_list.append(loss)
metric_vals = metrics.compute_metrics(original_img=hr_imgs, resoluted_img=recovered_images)
mae_list.append(metric_vals["mae"])
mse_list.append(metric_vals["mae"])
psnr_list.append(metric_vals["psnr"])
ssim_list.append(metric_vals["ssim"])
ms_ssim_list.append(metric_vals["ms_ssim"])
loss = metrics.get_loss_stats(loss_list, message="Test Loss Stats")
results = {
"loss": loss,
"mse": torch.mean(torch.stack(mse_list)),
"mae": torch.mean(torch.stack(mae_list)),
"psnr": torch.mean(torch.stack(psnr_list)),
"ssim": torch.mean(torch.stack(ssim_list)),
"sm_ssim": torch.mean(torch.stack(ms_ssim_list)),
}
return results
if __name__ == "__main__":
os.system("clear")
exp_directory, noise, std = arguments.get_directory_argument(generalization=True)
checkpoint = 90
evaluator = Evaluate(exp_path=exp_directory, checkpoint=checkpoint)
if(noise == ""):
evaluator.load_dataset()
else:
evaluator.load_generalization_dataset(noise=noise, std=std)
evaluator.load_model()
results = evaluator.test_model()
print(f"Test Loss: {results['loss']}")
print(f"Test MAE: {results['mae']}")
print(f"Test MSE: {results['mse']}")
print(f"Test PSNR: {results['psnr']}")
print(f"Test SSIM: {results['ssim']}")
print(f"Test SM-SSIM: {results['sm_ssim']}")
# creating/saving generalization results
if(noise != ""):
gen_logs_path = os.path.join(exp_directory, "generalization_logs.json")
if(not os.path.exists(gen_logs_path)):
gen_logs = utils.create_generalization_logs(exp_directory)
else:
gen_logs = utils.load_generalization_logs(exp_directory)
exp_name = f"noise={noise}__std={std}"
gen_logs[exp_name] = {}
gen_logs[exp_name]["MAE"] = float(results['mae'])
gen_logs[exp_name]["MSE"] = float(results['mse'])
gen_logs[exp_name]["PSNR"] = float(results['psnr'])
gen_logs[exp_name]["SSIM"] = float(results['ssim'])
gen_logs[exp_name]["SM-SSIM"] = float(results['sm_ssim'])
with open(gen_logs_path, "w") as file:
json.dump(gen_logs, file)
#
|
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse
from django.views.generic import TemplateView
from .forms import PostForm, CommentForm
from .models import Post, Comment
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
@login_required
def get_posts(request):
if request.method == 'POST':
if 'post' in request.POST:
postForm = PostForm(request.POST)
if postForm.is_valid():
post = postForm.save(commit=False)
post.author = request.user
post.save()
return redirect('cabPosts:posts')
else:
commentForm=CommentForm(request.POST)
if commentForm.is_valid():
post_id = request.POST['post_id']
post_instance = get_object_or_404(Post, id=post_id)
comment = commentForm.save(commit=False)
comment.name = request.user
comment.post = post_instance
comment.email = request.user.email
comment.save()
return redirect('cabPosts:posts')
else:
return render(request,'500.html',{})
else:
postForm = PostForm()
posts = Post.objects.all()
commentForm = CommentForm()
comments=Comment.objects.all()
args = {'postForm':postForm, 'posts':posts ,'commentForm':commentForm,'comments':comments}
return render(request, 'cabPosts/posts.html', args)
|
# GET
import os
db_login = os.environ['db_login']
db_password = os.environ['db_password']
#
# MONGO STUFF
from pymongo import MongoClient
# from pymongo import MongoClient
# from bson.json_util import dumps
# from bson.objectid import ObjectId
client = MongoClient(
f"mongodb+srv://{db_login}:{db_password}@cluster0.qxw2m.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"
)
db = client.test_database
OSTAS = db.OSTAS
USERS = db.USERS
SHIPS = db.SHIPS
TICKETS = db.TICKETS
LOGS = db.LOGS
# print(*db.USERS.find({}))
#
#
from uuid import uuid4 # generate unique id
from datetime import date, datetime, timedelta
from flask import Flask, jsonify, redirect, render_template, request, session
app = Flask(__name__, static_url_path="/")
app.secret_key = str(uuid4())
@app.route("/")
def index():
if "user" in session and session["user"]["privledge"] == "ADMIN":
return render_template("admin.html")
return render_template("index.html", logined="user" in session)
@app.route("/stats/")
def stats():
if "user" in session and session["user"]["privledge"] == "ADMIN":
return render_template("stats.html")
return redirect("/login")
@app.route("/stats/csv/")
def csv_stats():
if "user" in session and session["user"]["privledge"] == "ADMIN":
return render_template("/components/html_to_csv.html")
return redirect("/login")
@app.route("/stats/json/")
def json_stats():
if "user" in session and session["user"]["privledge"] == "ADMIN":
return render_template("/components/html_to_json.html")
return redirect("/login")
@app.route("/cart/")
@app.route("/cart/<string:id>")
def cart(id=None):
if "user" in session and session["user"]["privledge"] != "ADMIN":
if not id:
return render_template("bag.html")
user = USERS.find_one({"_id": session["user"]["_id"]})
cart_item = list(filter(lambda c: c["_id"] == id, user["cart"]))
if len(cart_item) == 0:
return redirect("/cart/")
return render_template(
"components/html_ticket.html", cart=cart_item[0], user=user
)
return redirect("/login")
import auth, get_data, add_data, delete_data
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
|
# -*- coding: utf-8 -*-
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import posixpath
import re
import subprocess
import sys
import gslib
import gslib.tests.testcase as testcase
from gslib.tests.util import ObjectToURI as suri
from gslib.wildcard_iterator import ContainsWildcard
class TestStat(testcase.GsUtilIntegrationTestCase):
"""Integration tests for stat command."""
def test_stat_output(self):
object_uri = self.CreateObject(contents='z')
stdout = self.RunGsUtil(['stat', suri(object_uri)], return_stdout=True)
self.assertIn(object_uri.uri, stdout)
self.assertIn('Creation time:', stdout)
self.assertIn('Cache-Control:', stdout)
self.assertIn('Content-Encoding:', stdout)
self.assertIn('Content-Length:', stdout)
self.assertIn('Content-Type:', stdout)
self.assertIn('Hash (crc32c):', stdout)
self.assertIn('Hash (md5):', stdout)
self.assertIn('ETag:', stdout)
self.assertIn('Generation:', stdout)
self.assertIn('Metageneration:', stdout)
def test_minus_q_stat(self):
object_uri = self.CreateObject(contents='z')
stdout = self.RunGsUtil(['-q', 'stat', suri(object_uri)],
return_stdout=True)
self.assertEquals(0, len(stdout))
stdout = self.RunGsUtil(['-q', 'stat', suri(object_uri, 'junk')],
return_stdout=True, expected_status=1)
self.assertEquals(0, len(stdout))
def test_stat_of_non_object_uri(self):
self.RunGsUtil(['-q', 'stat', 'gs://'], expected_status=1)
self.RunGsUtil(['-q', 'stat', 'gs://bucket/object'], expected_status=1)
self.RunGsUtil(['-q', 'stat', 'file://tmp/abc'], expected_status=1)
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import doctest
import nodepy
import unittest
import os
import subprocess
import tempfile
import sys
import nbformat
if sys.version_info >= (3,0):
kernel = 'python3'
else:
kernel = 'python2'
def _notebook_run(path):
"""Execute a notebook via nbconvert and collect output.
:returns (parsed nb object, execution errors)
"""
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = ["jupyter", "nbconvert", "--to", "notebook", "--execute",
"--ExecutePreprocessor.timeout=120",
"--ExecutePreprocessor.kernel_name="+kernel,
"--output", fout.name, path]
subprocess.check_call(args)
fout.seek(0)
nb = nbformat.reads(fout.read().decode('utf-8'), nbformat.current_nbformat)
errors = [output for cell in nb.cells if "outputs" in cell
for output in cell["outputs"]
if output.output_type == "error"]
return nb, errors
def run_tests():
for filename in os.listdir('./examples'):
if (filename.split('.')[-1] == 'ipynb' and
filename not in ['Internal_stability_SO.ipynb',
'Introduction to NodePy.ipynb',
'stability_polynomial_speed.ipynb']):
print('running notebook: '+ filename)
_, errors = _notebook_run('./examples/'+filename)
if errors != []:
raise(Exception)
for module_name in ['runge_kutta_method',
'linear_multistep_method',
'twostep_runge_kutta_method',
'downwind_runge_kutta_method',
'ivp',
'low_storage_rk',
'rooted_trees',
'snp',
'stability_function',
'general_linear_method',
'ode_solver',
'semidisc',
'strmanip',
'utils',
'graph',
'convergence',
'loadmethod']:
module = nodepy.__getattribute__(module_name)
doctest.testmod(module)
unittest.main(module='nodepy.unit_tests',exit=False)
if __name__ == '__main__':
run_tests()
|
import sys
import time
import argparse
import requests
import bs4
# Douban house renting groups
MAIN_URLS = {
'hangzhou': 'https://www.douban.com/group/145219/',
'beijing' : 'https://www.douban.com/group/fangzi/',
'shanghai': 'https://www.douban.com/group/shanghaizufang/'
}
# Open URL, return HTML data
def open_url(url):
# fake User-Agent
ua_headers = {
"User-Agent":
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
res = requests.get(url, headers = ua_headers)
return res.text
# Find target
def find_target(html):
soup = bs4.BeautifulSoup(html, "html.parser")
target_table = soup.find_all("table", class_ = "olt")
target_td = target_table[0].find_all("td", class_ = "title")
return target_td
# Search keywords in target
def find_keyword(target_td, keyword = ''):
for i in target_td:
# keyword fit
if(i.a['title'].find(keyword) != -1):
print(i.a['title'])
print(i.a['href'])
else:
continue
# Main process
def main():
# Parser CLI arguments
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--search', help='search keywords')
parser.add_argument('-l', '--limit', help='records limitation')
parser.add_argument('-r', '--region', help='regions')
args = parser.parse_args()
# Get the CLI arguments
# s = args.search; l = args.limit; r = args.region;
# Set default arguments
s = ''
l = 1000
r = 'hangzhou'
# Set CLI arguments
if(args.search != None):
s = args.search
if(args.limit != None):
l = args.limit
if(args.region != None):
r = args.region
# form 0 to limit, step 25
for i in range(0, int(l), 25):
html = open_url(MAIN_URLS[r] + 'discussion' + '?start=' + str(i))
target = find_target(html)
find_keyword(target, s)
if __name__ == '__main__':
main()
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
from PostgreSQLConnector import PostgresConnector
import json
# Program that connects to Urban Dictionary.com & oxford dictionary.com
# to search for words and determine if each word is sexist or not
class UrbanDictCrawler:
def __init__(self, fileName): # format = 'json' or 'csv"
self.session_urban = requests.session()
self.fileName = fileName
self.psql = PostgresConnector()
self.conn = self.psql.connect()
self.cur = self.conn.cursor()
self.english_word_list = []
# with open('words_dictionary.json') as english_dict:
if fileName[-4:] == 'json':
with open(fileName) as english_dict:
d = json.load(english_dict)
english_dict.close()
self.english_word_list = d.keys()
else: # csv
with open(fileName) as english_dict:
self.csvFile = pd.read_csv(fileName)
def crawl(self):
url_urban = "https://www.urbandictionary.com/define.php?term="
isSexist = 0
# print("fine until line 32")
if self.fileName[-4:] == 'json':
for word in self.english_word_list:
# print("fine until line 35")
res = self.session_urban.get(url_urban + word) # This is where we actually connect to the URL
# print(res)
soup = BeautifulSoup(res.text, "html.parser") # this is where we use the html parser to parse
if soup.find_all('a',href="/category.php?category=sex"): # if there exists at least 1 definition of the word that is labeled as sexist, (the anchor tags <a> that has the attr href= ~)
stmt= "UPDATE \"LabeledWords\" SET urban_sexist = 1 WHERE word = \'{}\'".format(word)
isSexist = 1
# else :
# stmt= "UPDATE \"LabeledWords\" SET urban_sexist = 0 WHERE word = {}".format(word)
# print(word +": "+ str(isSexist))
self.cur.execute(stmt)
self.conn.commit()
time.sleep(0.2)
else: # in case of the csv file,
for row_index, key_val in self.csvFile.iterrows():
# url_urban += key_val
# url_oxford += key_val
res = self.session_urban.get(url_urban + str(key_val[0])) # This is where we actually connect to the URL
soup = BeautifulSoup(res.text, "html.parser") # this is where we use the html parser to parse
if soup.find_all('a',href="/category.php?category=sex"): # if there exists at least 1 definition of the word that is labeled as sexist, (the anchor tags <a> that has the attr href= ~)
stmt= "UPDATE \"LabeledWords\" SET urban_sexist = 1 WHERE word = \'{}\'".format(key_val[0])
isSexist = 1
# else :
# stmt= "UPDATE \"LabeledWords\" SET urban_sexist = 0 WHERE word = {}".format(key_val[0])
print(key_val[0] + ": " + isSexist)
self.cur.execute(stmt)
self.conn.commit()
time.sleep(0.2)
|
from ds_discovery import Controller
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
__author__ = 'Darryl Oatridge'
def domain_controller():
# Controller
uri_pm_repo = os.environ.get('HADRON_PM_REPO', None)
controller = Controller.from_env(uri_pm_repo=uri_pm_repo, default_save=False, has_contract=True)
run_book = os.environ.get('HADRON_CONTROLLER_RUNBOOK', None)
repeat = os.environ.get('HADRON_CONTROLLER_REPEAT', None)
sleep = os.environ.get('HADRON_CONTROLLER_SLEEP', None)
controller.run_controller(run_book=run_book, repeat=repeat, sleep=sleep)
if __name__ == '__main__':
domain_controller()
|
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import Callback, ModelCheckpoint
from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import save_model
import tensorflow as tf
import tempfile
# Authenticate your Google account (this will open a window in a web brower)
from pydrive.auth import GoogleAuth
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
# Create GoogleDrive instance
from pydrive.drive import GoogleDrive
drive = GoogleDrive(gauth)
# Callback class for saving to GDrive
class GoogleDriveSaver(Callback):
def __init__(self, folder_name, frequency=1):
super().__init__()
self.frequency = frequency
# Search for folder to save in
file_list = drive.ListFile({'q': f"title='{folder_name}' and trashed=false and mimeType='application/vnd.google-apps.folder'"}).GetList()
if len(file_list) > 1:
raise ValueError('There are multiple folders with that specified folder name')
elif len(file_list) == 0:
raise ValueError('No folders match that specified folder name')
# Save the folder's ID
self.folder_id = file_list[0]['id']
def on_epoch_end(self, epoch, logs=None):
if epoch % self.frequency == 0:
# Unfortunately we can't get the raw save file output of model.save, so we need
# to save it to a tempfile and store that tempfile in Google Drive
temp_save = tempfile.NamedTemporaryFile(suffix='.hdf5')
self.model.save(temp_save.name)
file = drive.CreateFile({'title': f'model-save_epoch-{epoch}.hdf5', 'parents': [{'id': self.folder_id}]})
file.SetContentFile(temp_save.name)
file.Upload()
temp_save.close()
google_drive_saver = GoogleDriveSaver('test-saves')
### Regular setup for MNIST ###
# Adapted from: https://keras.io/examples/mnist_cnn/
batch_size = 128
num_classes = 10
epochs = 12
img_rows, img_cols = 28, 28
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[google_drive_saver]) # callback for saving in Google Drive
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
import inspect
# from collections import OrderedDict as odict
import numpy as np
from galry import Manager, TextVisual, get_color, NavigationEventProcessor, \
DefaultEventProcessor, EventProcessor, GridEventProcessor, ordict, \
log_debug, log_info, log_warn
__all__ = ['InteractionManager']
class InteractionManager(Manager):
"""This class implements the processing of the raised interaction events.
To be overriden.
"""
# Initialization methods
# ----------------------
def __init__(self, parent):
super(InteractionManager, self).__init__(parent)
self.cursor = None
self.prev_event = None
self.processors = ordict()
self.initialize_default(
constrain_navigation=self.parent.constrain_navigation,
momentum=self.parent.momentum)
self.initialize()
def initialize(self):
"""Initialize the InteractionManager.
To be overriden.
"""
pass
def initialize_default(self, **kwargs):
pass
# Processor methods
# -----------------
def get_processors(self):
"""Return all processors."""
return self.processors
def get_processor(self, name):
"""Return a processor from its name."""
if name is None:
name = 'processor0'
return self.processors.get(name, None)
def add_processor(self, cls, *args, **kwargs):
"""Add a new processor, which handles processing of interaction events.
Several processors can be defined in an InteractionManager instance.
One event can be handled by several processors.
"""
# get the name of the visual from kwargs
name = kwargs.pop('name', 'processor%d' % (len(self.get_processors())))
if self.get_processor(name):
raise ValueError("Processor name '%s' already exists." % name)
activated = kwargs.pop('activated', True)
processor = cls(self, *args, **kwargs)
self.processors[name] = processor
processor.activate(activated)
return processor
def add_default_processor(self):
"""Add a default processor, useful to add handlers for events
in the InteractionManager without explicitely creating a new
processor."""
return self.add_processor(EventProcessor, name='default_processor')
def register(self, event, method):
"""Register a new handler for an event, using the manager's default
processor."""
processor = self.get_processor('default_processor')
if processor is None:
processor = self.add_default_processor()
processor.register(event, method)
# Event processing methods
# ------------------------
def process_event(self, event, parameter):
"""Process an event.
This is the main method of this class. It is called as soon as an
interaction event is raised by an user action.
Arguments:
* event: the event to process, an InteractionEvent string.
* parameter: the parameter returned by the param_getter function
specified in the related binding.
"""
# process None events in all processors
if event is None and self.prev_event is not None:
for name, processor in self.get_processors().iteritems():
processor.process_none()
self.cursor = None
# process events in all processors
if event is not None:
for name, processor in self.get_processors().iteritems():
if processor.activated and processor.registered(event):
# print name, event
processor.process(event, parameter)
cursor = processor.get_cursor()
if self.cursor is None:
self.cursor = cursor
self.prev_event = event
def get_cursor(self):
return self.cursor
|
#!/usr/bin/env python3
import os
import sys
thispath = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(os.path.dirname(thispath),"helper"))
from MiscFxns import *
from StandardModules import *
def CompareEgy(EgyIn):
return EgyIn+224.912529687124<0.00001
def CompareGrad(GradIn):
CorrectGrad=[
0.00631057813355, 0.00571458363554, 0.05476152065996,
0.02287072160272, -0.0002840915734, -0.03359062789176,
-0.02457654725095, -0.00435313214139, -0.02443656592336,
-0.02033326759132, -0.04939904659428, -0.00601012407546,
0.01536321804528, 0.02452313009004, -0.01889869345071,
0.0056070168479, 0.02707750704665, 0.03157680066598,
0.01965867456494, 0.03636269982351, -0.03762798149958,
-0.03166475907529, -0.02714461080685, 0.00193798500615,
0.00676436472219, -0.01249703947853, 0.03228768650336]
AllGood=True
for i in range(0,27):
AllGood=AllGood and CorrectGrad[i]-GradIn[i]<0.00001
return AllGood
def Run(mm):
try:
tester = psr.testing.Tester("Testing SCF")
tester.print_header()
LoadDefaultModules(mm)
mm.change_option("PSR_SCF","BASIS_SET","sto-3g")
MyMod=mm.get_module("PSR_SCF",0)
mol=psr.system.MakeSystem("""
0 1
O 1.2361419 1.0137761 -0.0612424
H 0.5104418 0.8944555 0.5514190
H 1.9926927 1.1973129 0.4956931
O -0.9957202 0.0160415 1.2422556
H -1.4542703 -0.5669741 1.8472817
H -0.9377950 -0.4817912 0.4267562
O -0.2432343 -1.0198566 -1.1953808
H 0.4367536 -0.3759433 -0.9973297
H -0.5031835 -0.8251492 -2.0957959
""")
mol = ApplyBasis(mol,"sto-3g","sto-3g")
wfn=psr.datastore.Wavefunction()
wfn.system=mol
NewWfn,Egy=MyMod.Deriv(0,wfn)
tester.test("Testing Energy via Deriv(0)", True, CompareEgy, Egy[0])
NewWfn,Egy=MyModenergy(wfn)
tester.test("Testing Energy via Energy()", True, CompareEgy, Egy)
NewWfn,Grad=MyMod.Deriv(1,wfn)
tester.test("Testing Gradient via Deriv(1)", True, CompareGrad, Grad)
NewWfn,Grad=MyMod.Gradient(wfn)
tester.test("Testing Gradient via Gradient()", True, CompareGrad, Grad)
tester.print_results()
except Exception as e:
psr.output.Output("Caught exception in main handler\n")
traceback.print_exc()
with psr.ModuleAdministrator() as mm:
Run(mm)
psr.finalize()
|
from .wide_resnet import wide_resnet_28_2
|
# -*- coding: utf-8 -*-
# date: 2018-12-02 15:47
from torch.autograd import Variable
from .functional import subsequent_mask
class Batch(object):
"""
Object for holding a batch of data with mask during training.
"""
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum().item()
@staticmethod
def make_std_mask(tgt, pad):
"""
Create a mask to hide padding and future words.
"""
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
|
from django.shortcuts import render
from .models import language
# Create your views here.
def Serach(req):
Input = req.GET
key = Input.get('key')
result = language.objects.filter(name__contains=key)
return render(req,'Serach.html',{'data':result,'title':'方言搜索'})
def Test(req):
return render(req,'Test.html',context={})
|
from math import e, exp, log
print(pow(e, 1) == exp(log(e)))
print(pow(2, 2) == exp(2 * log(2)))
print(log(e, e) == exp(0))
|
"""removed user_x_branch
Revision ID: 182718a0a9ae
Revises: f7f61b0fadff
Create Date: 2020-11-26 02:10:39.739942
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '182718a0a9ae'
down_revision = 'f7f61b0fadff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_x_branch')
op.add_column(
'users', sa.Column('branch_id', sa.Integer(), nullable=True)
)
op.create_foreign_key(
'fk_users_branches',
'users', 'branches',
['branch_id'],
['id']
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('fk_users_branches', 'users', type_='foreignkey')
op.drop_column('users', 'branch_id')
op.create_table(
'user_x_branch',
sa.Column(
'user_id', sa.INTEGER(), autoincrement=False, nullable=False
),
sa.Column(
'branch_id', sa.INTEGER(), autoincrement=False, nullable=False
),
sa.ForeignKeyConstraint(
tuple(['branch_id']),
['branches.id'],
name='user_x_branch_branch_id_fkey')
,
sa.ForeignKeyConstraint(
tuple(['user_id']),
['users.id'],
name='user_x_branch_user_id_fkey'
)
)
# ### end Alembic commands ###
|
""" Test the bss module.
MIT License
(C) Copyright [2020] Hewlett Packard Enterprise Development LP
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
# pylint: disable=invalid-name
# pylint: disable=too-many-arguments, unused-argument
import json
from ..utils.runner import cli_runner # pylint: disable=unused-import
from ..utils.rest import rest_mock # pylint: disable=unused-import
# pylint: disable=redefined-outer-name
def test_cray_bss_help_info(cli_runner, rest_mock):
""" Test `cray bss` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['bss'])
outputs = [
"cli bss [OPTIONS] COMMAND [ARGS]...",
"Boot Script Service API",
"bootparameters",
"bootscript",
"dumpstate",
"hosts",
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0
# pylint: disable=redefined-outer-name
def test_cray_bss_bootparameters(cli_runner, rest_mock):
""" Test `cray bss bootparameters` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['bss', 'bootparameters'])
outputs = [
"cli bss bootparameters [OPTIONS] COMMAND [ARGS]...",
"create",
"delete",
"list",
"replace",
"update",
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0
# pylint: disable=redefined-outer-name
def test_cray_bss_bootscript(cli_runner, rest_mock):
""" Test `cray bss bootscript` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['bss', 'bootscript'])
outputs = [
"cli bss bootscript [OPTIONS] COMMAND [ARGS]...",
"list"
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0
# pylint: disable=redefined-outer-name
def test_cray_bss_dumpstate(cli_runner, rest_mock):
""" Test `cray bss dumpstate` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['bss', 'dumpstate'])
outputs = [
"cli bss dumpstate [OPTIONS] COMMAND [ARGS]...",
"list",
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0
# pylint: disable=redefined-outer-name
def test_cray_bss_hosts(cli_runner, rest_mock):
""" Test `cray capmc get_xname_status` to make sure the expected commands are available """
runner, cli, _ = cli_runner
result = runner.invoke(cli, ['bss', 'hosts'])
outputs = [
"cli bss hosts [OPTIONS] COMMAND [ARGS]...",
"create",
"list",
]
for out in outputs:
assert out in result.output
assert result.exit_code == 0
kernel = '/test/kernel'
initrd = '/test/initrd'
params = 'foo bar params'
host = 'foo'
nid = 42
mac = "11_22_33_44_55_66"
ts = '12345678'
arch = 'x86_64'
retry = '1'
# pylint: disable=redefined-outer-name
def test_cray_bss_rest_call_create(cli_runner, rest_mock):
""" Test `cray bss create` with various params """
# pylint: disable=protected-access
runner, cli, opts = cli_runner
url_template = '/apis/bss/boot/v1/bootparameters'
config = opts['default']
hostname = config['hostname']
result = runner.invoke(cli, ['bss', 'bootparameters', 'create',
'--hosts', host,
'--kernel', kernel,
'--initrd', initrd,
'--nids', str(nid),
'--macs', mac,
'--params', params])
print(result.output)
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'].lower() == 'post'
assert data.get('body')
body = data.get('body')
assert body['kernel'] == kernel
assert body['initrd'] == initrd
assert body['params'] == params
assert host in body['hosts']
assert nid in body['nids']
assert mac in body['macs']
uri = data['url'].split(hostname)[-1]
assert uri == url_template
# pylint: disable=redefined-outer-name
def test_cray_bss_rest_call_replace(cli_runner, rest_mock):
""" Test `cray bss create` with various params """
# pylint: disable=protected-access
runner, cli, opts = cli_runner
url_template = '/apis/bss/boot/v1/bootparameters'
config = opts['default']
hostname = config['hostname']
result = runner.invoke(cli, ['bss', 'bootparameters', 'replace',
'--hosts', host,
'--kernel', kernel,
'--initrd', initrd,
'--nids', str(nid),
'--macs', mac,
'--params', params])
print(result.output)
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'].lower() == 'put'
assert data.get('body')
body = data.get('body')
assert body['kernel'] == kernel
assert body['initrd'] == initrd
assert body['params'] == params
assert host in body['hosts']
assert nid in body['nids']
assert mac in body['macs']
uri = data['url'].split(hostname)[-1]
assert uri == url_template
# pylint: disable=redefined-outer-name
def test_cray_bss_bootscript_call(cli_runner, rest_mock):
""" Test `cray bss create` with various params """
# pylint: disable=protected-access
runner, cli, opts = cli_runner
url_template = '/apis/bss/boot/v1/bootscript'
config = opts['default']
hostname = config['hostname']
result = runner.invoke(cli, ['bss', 'bootscript', 'list',
'--name', host,
'--arch', arch,
'--mac', mac,
'--nid', str(nid),
'--retry', retry,
'--ts', str(ts)])
print(result.output)
assert result.exit_code == 0
data = json.loads(result.output)
assert data['method'].lower() == 'get'
assert data.get('body') is None
uri = data['url'].split(hostname)[-1]
assert url_template in uri
assert "name=" + host in uri
assert "arch=" + arch in uri
assert "mac=" + mac in uri
assert "nid=" + str(nid) in uri
assert "retry=" + retry in uri
assert "ts=" + ts in uri
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% User not present"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
# Strings like this regarding VLANs are not errors
re.compile(br"[^\r\n]+ not found(?! in current VLAN)", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"[^\r\n](?<! shell )\/bin\/(?:ba)?sh"),
re.compile(br"% More than \d+ OSPF instance", re.I),
re.compile(br"% Subnet [0-9a-f.:/]+ overlaps", re.I),
re.compile(br"Maximum number of pending sessions has been reached"),
]
def on_open_shell(self):
pass
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
cmd[u'answer'] = passwd
cmd[u'prompt_retry_check'] = True
try:
self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
prompt = self._get_prompt()
if prompt is None or not prompt.endswith(b'#'):
raise AnsibleConnectionFailure('failed to elevate privilege to enable mode still at prompt [%s]' % prompt)
except AnsibleConnectionFailure as e:
prompt = self._get_prompt()
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode, at prompt [%s] with error: %s' % (prompt, e.message))
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from utils import padding_idx
from ask_agent import AskAgent
from verbal_ask_agent import VerbalAskAgent
class EncoderLSTM(nn.Module):
def __init__(self, vocab_size, embedding_size, hidden_size, padding_idx,
dropout_ratio, device, bidirectional=False, num_layers=1):
super(EncoderLSTM, self).__init__()
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.drop = nn.Dropout(p=dropout_ratio)
self.num_directions = 2 if bidirectional else 1
self.num_layers = num_layers // (2 if bidirectional else 1)
self.embedding = nn.Embedding(vocab_size, embedding_size, padding_idx)
self.lstm = nn.LSTM(embedding_size, hidden_size, self.num_layers,
batch_first=True, dropout=dropout_ratio if num_layers > 1 else 0,
bidirectional=bidirectional)
self.encoder2decoder = nn.Linear(hidden_size * self.num_directions,
hidden_size * self.num_directions)
self.device = device
def init_state(self, inputs):
batch_size = inputs.size(0)
h0 = torch.zeros(
(self.num_layers * self.num_directions, batch_size, self.hidden_size),
dtype=torch.float,
device=self.device)
c0 = torch.zeros(
(self.num_layers * self.num_directions, batch_size, self.hidden_size),
dtype=torch.float,
device=self.device)
return h0, c0
def forward(self, inputs, lengths):
# Sort inputs by length
sorted_lengths, forward_index_map = lengths.sort(0, True)
inputs = inputs[forward_index_map]
embeds = self.embedding(inputs) # (batch, seq_len, embedding_size)
embeds = self.drop(embeds)
state = self.init_state(inputs)
packed_embeds = pack_padded_sequence(embeds, sorted_lengths.to('cpu'), batch_first=True)
enc_h, state = self.lstm(packed_embeds, state)
state = (self.encoder2decoder(state[0]), self.encoder2decoder(state[1]))
ctx, lengths = pad_packed_sequence(enc_h, batch_first=True)
ctx = self.drop(ctx)
# Unsort outputs
_, backward_index_map = forward_index_map.sort(0, False)
ctx = ctx[backward_index_map]
return ctx, state
class Attention(nn.Module):
def __init__(self, dim, coverage_dim=None):
super(Attention, self).__init__()
self.linear_in = nn.Linear(dim, dim, bias=False)
self.sm = nn.Softmax(dim=1)
self.linear_out = nn.Linear(dim * 2, dim, bias=False)
self.tanh = nn.Tanh()
if coverage_dim is not None:
self.cov_rnn = nn.GRU(dim * 2 + 1, coverage_dim, 1)
self.cov_linear = nn.Linear(coverage_dim, dim)
def forward(self, h, context, mask=None, cov=None):
target = self.linear_in(h).unsqueeze(2) # batch x dim x 1
if cov is not None:
context = context + self.cov_linear(cov)
# Get attention
attn = torch.bmm(context, target).squeeze(2) # batch x seq_len
if mask is not None:
# -Inf masking prior to the softmax
attn.data.masked_fill_(mask, -float('inf'))
attn = self.sm(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len
weighted_context = torch.bmm(attn3, context).squeeze(1) # batch x dim
h_tilde = torch.cat((weighted_context, h), 1)
h_tilde = self.tanh(self.linear_out(h_tilde))
# Update coverage vector
if hasattr(self, 'cov_rnn') and hasattr(self, 'cov_linear'):
cov_expand = cov.view(-1, cov.size(2))
context_expand = context.view(-1, context.size(2))
h_expand = h.unsqueeze(1).expand(-1, cov.size(1), -1).contiguous().view(-1, h.size(1))
attn_expand = attn.unsqueeze(2).view(-1, 1)
concat_input = torch.cat((context_expand, h_expand, attn_expand), 1)
new_cov, _ = self.cov_rnn(concat_input.unsqueeze(0), cov_expand.unsqueeze(0))
new_cov = new_cov.squeeze(0).view_as(cov)
else:
new_cov = None
return h_tilde, attn, new_cov
class AskAttnDecoderLSTM(nn.Module):
def __init__(self, hparams, agent_class, device):
super(AskAttnDecoderLSTM, self).__init__()
self.device = device
self.nav_embedding = nn.Embedding(agent_class.n_input_nav_actions(),
hparams.nav_embed_size, padding_idx=padding_idx)
self.ask_embedding = nn.Embedding(agent_class.n_input_ask_actions(hparams),
hparams.ask_embed_size)
lstm_input_size = hparams.nav_embed_size + hparams.ask_embed_size + hparams.img_feature_size
self.budget_embedding = nn.Embedding(hparams.max_ask_budget, hparams.budget_embed_size)
self.drop = nn.Dropout(p=hparams.dropout_ratio)
self.lstm = nn.LSTM(
lstm_input_size, hparams.hidden_size, hparams.num_lstm_layers,
dropout=hparams.dropout_ratio if hparams.num_lstm_layers > 1 else 0,
bidirectional=False)
self.attention_layer = Attention(hparams.hidden_size,
coverage_dim=hparams.coverage_size
if hasattr(hparams, 'coverage_size') else None)
self.nav_predictor = nn.Linear(hparams.hidden_size, agent_class.n_output_nav_actions())
ask_predictor_input_size = hparams.hidden_size * 2 + \
agent_class.n_output_nav_actions() + hparams.img_feature_size + \
hparams.budget_embed_size
ask_predictor_layers = []
current_layer_size = ask_predictor_input_size
next_layer_size = hparams.hidden_size
if not hasattr(hparams, 'num_ask_layers'):
hparams.num_ask_layers = 1
for i in range(hparams.num_ask_layers):
ask_predictor_layers.append(nn.Linear(current_layer_size, next_layer_size))
ask_predictor_layers.append(nn.ReLU())
ask_predictor_layers.append(nn.Dropout(p=hparams.dropout_ratio))
current_layer_size = next_layer_size
next_layer_size //= 2
ask_predictor_layers.append(nn.Linear(current_layer_size, agent_class.n_output_ask_actions(hparams)))
self.ask_predictor = nn.Sequential(*tuple(ask_predictor_layers))
self.backprop_softmax = hparams.backprop_softmax
self.backprop_ask_features = hparams.backprop_ask_features
def _lstm_and_attend(self, nav_action, ask_action, feature, h, ctx, ctx_mask,
budget=None, cov=None):
nav_embeds = self.nav_embedding(nav_action)
ask_embeds = self.ask_embedding(ask_action)
lstm_inputs = [nav_embeds, ask_embeds, feature]
concat_lstm_input = torch.cat(lstm_inputs, dim=1)
drop = self.drop(concat_lstm_input)
output, new_h = self.lstm(drop.unsqueeze(0), h)
output = output.squeeze(0)
output_drop = self.drop(output)
# Attention
h_tilde, alpha, new_cov = self.attention_layer(output_drop, ctx, ctx_mask, cov=cov)
return h_tilde, alpha, output_drop, new_h, new_cov
def forward(self, nav_action, ask_action, feature, h, ctx, ctx_mask,
nav_logit_mask, ask_logit_mask,
budget=None, cov=None):
h_tilde, alpha, output_drop, new_h, new_cov = self._lstm_and_attend(
nav_action, ask_action, feature, h, ctx, ctx_mask, budget=budget, cov=cov)
# Predict nav action.
nav_logit = self.nav_predictor(h_tilde)
nav_logit.data.masked_fill_(nav_logit_mask, -float('inf'))
nav_softmax = F.softmax(nav_logit, dim=1)
if not self.backprop_softmax:
nav_softmax = nav_softmax.detach()
assert budget is not None
budget_embeds = self.budget_embedding(budget)
ask_predictor_inputs = [h_tilde, output_drop, feature, nav_softmax, budget_embeds]
# Predict ask action.
concat_ask_predictor_input = torch.cat(ask_predictor_inputs, dim=1)
if not self.backprop_ask_features:
concat_ask_predictor_input = concat_ask_predictor_input.detach()
ask_logit = self.ask_predictor(concat_ask_predictor_input)
ask_logit.data.masked_fill_(ask_logit_mask, -float('inf'))
return new_h, alpha, nav_logit, nav_softmax, ask_logit, new_cov
def forward_nav(self, nav_action, ask_action, feature, h, ctx, ctx_mask,
nav_logit_mask, budget=None, cov=None):
h_tilde, alpha, output_drop, new_h, new_cov = self._lstm_and_attend(
nav_action, ask_action, feature, h, ctx, ctx_mask, budget=budget, cov=cov)
# Predict nav action.
nav_logit = self.nav_predictor(h_tilde)
nav_logit.data.masked_fill_(nav_logit_mask, -float('inf'))
nav_softmax = F.softmax(nav_logit, dim=1)
return new_h, alpha, nav_logit, nav_softmax, new_cov
class AttentionSeq2SeqModel(nn.Module):
def __init__(self, vocab_size, hparams, device):
super(AttentionSeq2SeqModel, self).__init__()
enc_hidden_size = hparams.hidden_size // 2 \
if hparams.bidirectional else hparams.hidden_size
self.encoder = EncoderLSTM(vocab_size,
hparams.word_embed_size,
enc_hidden_size,
padding_idx,
hparams.dropout_ratio,
device,
bidirectional=hparams.bidirectional,
num_layers=hparams.num_lstm_layers)
if 'verbal' in hparams.advisor:
agent_class = VerbalAskAgent
elif hparams.advisor == 'direct':
agent_class = AskAgent
else:
sys.exit('%s advisor not supported' % hparams.advisor)
self.decoder = AskAttnDecoderLSTM(hparams, agent_class, device)
def encode(self, *args, **kwargs):
return self.encoder(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
def decode_nav(self, *args, **kwargs):
return self.decoder.forward_nav(*args, **kwargs)
|
def check_fibonacci(n):
"""Returns the nth Fibonacci number for n up to 40"""
fib = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584, 4181,6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418, 317811, 514229, 832040, 1346269, 2178309, 3524578, 5702887, 9227465, 14930352, 24157817, 39088169, 63245986, 102334155]
if n > 40 or n < 1:
print('Sorry, n should be 40 or less and greater than 0')
return 0
else:
return fib[n-1]
def fibonacci_wasteful(n):
"""Recursively calculate the nth Fibonacci number, in obvious but wasteful fashion"""
if n == 1 or n == 2:
return 1
else:
return fibonacci_wasteful(n-1) + fibonacci_wasteful(n-2)
#Used by 'counted' functions for ease of counting
fib_val = 0
fib_counts = []
def fibonacci_wasteful_counted(n, start_count=True):
"""Recursively calculate the nth Fibonacci number, in obvious but wasteful fashion, counting number of calls to each n"""
global fib_counts, fib_val
if start_count:
fib_val = n
fib_counts = [0]*n
fib_counts[n-1] = 1
else:
fib_counts[n-1] += 1
if n == 1 or n == 2:
return 1
else:
return fibonacci_wasteful_counted(n-1, start_count=False) + fibonacci_wasteful_counted(n-2, start_count=False)
def fibonacci(n):
"""Wrapper around the clean recursive Fibonacci routine"""
return fib_clean(n)[1]
def fib_clean(n):
""" Calcuate the nth Fibonacci number, without unnecesarry work"""
if n == 2:
return 1, 1
else:
tmp, b = fib_clean(n-1)
a = b
b = b + tmp
return a, b
def fibonacci_counted(n):
"""Wrapper around the clean recursive Fibonacci routine with counts"""
return fib_clean_counted(n)[1]
def fib_clean_counted(n, start_count=True):
""" Calcuate the nth Fibonacci number, without unnecesarry work"""
global fib_counts, fib_val
if start_count:
fib_val = n
fib_counts = [0]*n
fib_counts[n-1] = 1
else:
fib_counts[n-1] += 1
if n == 2:
return 1, 1
else:
tmp, b = fib_clean(n-1, start_count=False)
a = b
b = b + tmp
return a, b
def fibonacci_loop(n):
"""Calculate the nth Fibonacci number using a loop"""
nn = n - 2 #Already done 1st and second vals
fib = [1, 1]
while nn > 0:
tmp = fib[0]
fib[0] = fib[1]
fib[1] +=tmp
nn -= 1
#If even, return the first val, else, the second
return fib[n%2]
|
from Commands.Base import LitsQuestionsCommand
from WareHouse import wareHouse
import webbrowser
import PythonSheep.IOSheep.PrintFormat
class HelpDocument(LitsQuestionsCommand):
def run(self, userNowUsingLanguage:str, mainWareHouse:wareHouse):
mainPrintControler = PythonSheep.IOSheep.PrintFormat.PrintFormat()
print(mainWareHouse.languagesContents[userNowUsingLanguage]["commandsMessage"]["helpDocument"]["helpDocumentTitle1"])
print(mainWareHouse.languagesContents[userNowUsingLanguage]["commandsMessage"]["helpDocument"]["helpDocumentNo1"])
webbrowser.open(mainWareHouse.globalSittings["helpDocumentWebUrl"])
input(mainWareHouse.languagesContents[userNowUsingLanguage]["globalMessageTips"][
"anyKeyContinue_TipsMessage"])
mainPrintControler.UniversalClearScreen()
|
import bs4
import jinja2
import codecs
from collections import defaultdict
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--all", action="store_true", dest="convert_all")
parser.add_argument("--target", action="store", default="chosakukenhou.xml")
args = parser.parse_args()
templateLoader = jinja2.FileSystemLoader(searchpath="./templates/")
templateEnv = jinja2.Environment(loader=templateLoader)
def visitPart(root):
parts = root.findAll("Part")
if parts:
for x in parts:
refined_tree["root"].append(x)
x.html_id = x["Num"]
x.title = x.find("PartTitle").text
x.parent_id = "root"
visitChapter(x)
else:
root.html_id = "root"
visitChapter(root)
def visitChapter(tree):
for x in tree.findAll("Chapter"):
refined_tree[tree.html_id].append(x)
chapter_id = x["Num"]
if tree.html_id == "root":
# it's dummy container for laws without parts
x.html_id = chapter_id
else:
x.html_id = "{}.{}".format(tree.html_id, chapter_id)
x.title = x.find("ChapterTitle").text
x.parent_id = tree.html_id
x.parent_group = tree
visitSection(x)
def visitSection(tree):
for x in tree.findAll("Section"):
refined_tree[tree.html_id].append(x)
section_id = x["Num"]
x.html_id = "{}.{}".format(tree.html_id, section_id)
x.title = x.find("SectionTitle").text
x.parent_id = tree.html_id
x.parent_group = tree
visitSubsection(x)
def visitSubsection(tree):
for x in tree.findAll("Subsection"):
refined_tree[tree.html_id].append(x)
subsection_id = x["Num"]
x.html_id = "{}.{}".format(tree.html_id, subsection_id)
x.title = x.find("SubsectionTitle").text
x.parent_id = tree.html_id
x.parent_group = tree
visitDivision(x)
def visitDivision(tree):
for x in tree.findAll("Division"):
refined_tree[tree.html_id].append(x)
division_id = x["Num"]
x.html_id = "{}.{}".format(tree.html_id, division_id)
x.title = x.find("DivisionTitle").text
x.parent_id = tree.html_id
x.parent_group = tree
header_template = templateEnv.get_template("m_header.html")
def render_header(container):
return header_template.render(
container=container,
siblings=refined_tree[container.parent_id],
children=refined_tree[container.html_id])
article_template = templateEnv.get_template("m_article.html")
def render_articles(group):
articles = group.findAll("Article")
address = [group]
x = group
while x.parent_group:
x = x.parent_group
if x.html_id == "root":
# it's dummy container for laws without parts
break
address = [x] + address
for a in articles:
try:
a.caption = a.find("ArticleCaption").text
except:
pass
a.title = a.find("ArticleTitle").text
a.html_id = a["Num"]
paragraphs = a.findAll("Paragraph")
for p in paragraphs:
p.paragraph_id = p["Num"]
p.html_id = "{}.{}".format(a.html_id, p.paragraph_id)
p.sentences = p.find("ParagraphSentence").findAll("Sentence")
p.items = p.findAll("Item")
for x in p.items:
x.html_id = "{}.{}".format(p.html_id, x["Num"])
buf.append(article_template.render(article=a, paragraphs=paragraphs, address=address))
def render(item):
buf.append(render_header(item))
children = refined_tree[item.html_id]
if children:
for x in children:
render(x)
else:
render_articles(item)
def convert_one(target_file):
global refined_tree, buf
tree = bs4.BeautifulSoup(open(target_file), "xml")
refined_tree = defaultdict(list)
visitPart(tree.MainProvision) # updates refined_tree
buf = []
for x in refined_tree["root"]:
render(x) # updates buf
law_title = tree.find("LawTitle").text
output_file = target_file.replace("xml", "html")
base_template = templateEnv.get_template("base.html")
fo = codecs.open(output_file, "w", encoding="utf-8")
fo.write(base_template.render(
law_title=law_title,
layer1=refined_tree["root"],
main="\n".join(buf)))
fo.close()
def convert_all():
import glob
for x in glob.glob("*.xml"):
print(x)
convert_one(x)
if __name__ == "__main__":
if args.convert_all:
convert_all()
else:
convert_one(args.target)
|
from recon.core.module import BaseModule
from urlparse import urlparse
class Module(BaseModule):
meta = {
'name': 'Google CSE Hostname Enumerator',
'author': 'Tim Tomes (@LaNMaSteR53)',
'description': 'Leverages the Google Custom Search Engine API to harvest hosts using the \'site\' search operator. Updates the \'hosts\' table with the results.',
'required_keys': ['google_api', 'google_cse'],
'query': 'SELECT DISTINCT domain FROM domains WHERE domain IS NOT NULL',
}
def module_run(self, domains):
for domain in domains:
self.heading(domain, level=0)
base_query = 'site:' + domain
hosts = []
while True:
query = ''
# build query based on results of previous results
for host in hosts:
query += ' -site:%s' % (host)
query = base_query + query
results = self.search_google_api(query, limit=1)
if not results: break
for result in results:
host = urlparse(result['link']).netloc
if not host in hosts:
hosts.append(host)
# add each host to the database
self.add_hosts(host)
|
from floodsystem.stationdata import build_station_list
from floodsystem.geo import stations_by_distance, stations_within_radius, rivers_with_station, stations_by_river
from floodsystem.geo import rivers_by_station_number
def test_stationsbydistance():
stations = build_station_list()
p = (52.2053, 0.1218)
assert stations_by_distance(stations, p)[0] == ('Cambridge Jesus Lock', 0.840237595667494)
def test_stationwithinradius():
stations = build_station_list()
p = (52.2053, 0.1218)
assert stations_within_radius(stations,p,0) == []
def test_call_riverswithstation():
stations = build_station_list()
rivers_with_station(stations)
def test_call_stationsbyriver():
stations = build_station_list()
stations_by_river(stations)
def test_rivers_by_station_number():
stations = build_station_list()
list_of_rivers = rivers_by_station_number(stations, 9)
assert type(list_of_rivers) == list
|
from os import system, name
system('cls' if name == 'nt' else 'clear')
dsc = ('''DESAFIO 028:
Escreva um programa que faça o computador "pensar" em um
número inteiro entre 0 e 5 e peça para o usuário tentar descobrir
qual foi o número escolhido pelo computador. O programa deverá
escrever na tela se o usuário venceu ou perdeu.
''')
from random import randint
from time import sleep
n1 = randint(0, 5)
n2 = int(input('Adivinhe em um número entre 0 e 5: '))
sleep(3)
if n1 == n2:
print('Você venceu!')
else:
print('Você perdeu, o número correto era {}'.format(n1))
|
def main():
chk_lst = range(1, 21)
skip = max(chk_lst)
current = skip
while not chk_divisible(current, chk_lst):
current += skip
print(current)
def chk_divisible(n, lst):
chk = all(map(lambda x: n % x == 0, lst))
return chk
if __name__=="__main__":
main()
# Answer: 232792560
|
#!/usr/bin/env python
#
# getopt_tests.py: testing the svn command line processing
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys, re, os.path, logging
logger = logging.getLogger()
# Our testing module
import svntest
######################################################################
# Tests
#----------------------------------------------------------------------
# Naming convention for golden files: take the svn command line as a
# single string and apply the following sed transformations:
# echo svn option1 option2 ... | sed -e 's/ /_/g' -e 's/_--/--/g'
# Then append either _stdout or _stderr for the file descriptor to
# compare against.
def load_expected_output(basename):
"load the expected standard output and standard error"
# This directory contains all the expected output from svn.
getopt_output_dir = os.path.join(os.path.dirname(sys.argv[0]),
'getopt_tests_data')
stdout_filename = os.path.join(getopt_output_dir, basename + '_stdout')
stderr_filename = os.path.join(getopt_output_dir, basename + '_stderr')
exp_stdout = open(stdout_filename, 'r').readlines()
exp_stderr = open(stderr_filename, 'r').readlines()
return exp_stdout, exp_stderr
# With plaintext password storage enabled, `svn --version' emits a warning:
warn_line_re = re.compile("WARNING: Plaintext password storage")
# This is a list of lines to delete.
del_lines_res = [
# In 'svn --version', the date line is variable, for example:
# "compiled Apr 5 2002, 10:08:45"
re.compile(r'\s+compiled\s+'),
# Also for 'svn --version':
re.compile(r"\* ra_(neon|local|svn|serf) :"),
re.compile(r" - handles '(https?|file|svn)' scheme"),
re.compile(r" - with Cyrus SASL authentication"),
re.compile(r" - using serf \d+\.\d+\.\d+"),
re.compile(r"\* fs_(base|fs) :"),
# Remove 'svn --version' list of platform-specific
# auth cache providers.
re.compile(r"\* Wincrypt cache.*"),
re.compile(r"\* Plaintext cache.*"),
re.compile(r"\* Gnome Keyring"),
re.compile(r"\* GPG-Agent"),
re.compile(r"\* Mac OS X Keychain"),
re.compile(r"\* KWallet \(KDE\)"),
]
# This is a list of lines to search and replace text on.
rep_lines_res = [
# In 'svn --version', this line varies, for example:
# "Subversion Client, version 0.10.2-dev (under development)"
# "Subversion Client, version 0.10.2 (r1729)"
(re.compile(r'version \d+\.\d+\.\d+(-[^ ]*)? \(.*\)'),
'version X.Y.Z '),
# The copyright end date keeps changing; fix forever.
(re.compile(r'Copyright \(C\) 20\d\d The Apache '
'Software Foundation\.'),
'Copyright (C) YYYY The Apache Software Foundation'),
# In 'svn --version --quiet', we print only the version
# number in a single line.
(re.compile(r'^\d+\.\d+\.\d+(-[a-zA-Z0-9]+)?$'), 'X.Y.Z\n'),
]
# This is a trigger pattern that selects the secondary set of
# delete/replace patterns
switch_res_line = 'System information:'
# This is a list of lines to delete after having seen switch_res_line.
switched_warn_line_re = None
switched_del_lines_res = [
# In svn --version --verbose, dependent libs loaded
# shared libs are optional.
re.compile(r'^\* (loaded|linked)'),
# In svn --version --verbose, remove everything from
# the extended lists
re.compile(r'^ - '),
]
# This is a list of lines to search and replace text on after having
# seen switch_res_line.
switched_rep_lines_res = [
# We don't care about the actual canonical host
(re.compile('^\* running on.*$'), '* running on'),
]
def process_lines(lines):
"delete lines that should not be compared and search and replace the rest"
output = [ ]
warn_re = warn_line_re
del_res = del_lines_res
rep_res = rep_lines_res
skip_next_line = 0
for line in lines:
if skip_next_line:
skip_next_line = 0
continue
if line.startswith(switch_res_line):
warn_re = switched_warn_line_re
del_res = switched_del_lines_res
rep_res = switched_rep_lines_res
# Skip these lines from the output list.
delete_line = 0
if warn_re and warn_re.match(line):
delete_line = 1
skip_next_line = 1 # Ignore the empty line after the warning
else:
for delete_re in del_res:
if delete_re.match(line):
delete_line = 1
break
if delete_line:
continue
# Search and replace text on the rest.
for replace_re, replace_str in rep_res:
line = replace_re.sub(replace_str, line)
output.append(line)
return output
def run_one_test(sbox, basename, *varargs):
"run svn with args and compare against the specified output files"
### no need to use sbox.build() -- we don't need a repos or working copy
### for these tests.
exp_stdout, exp_stderr = load_expected_output(basename)
# special case the 'svn' test so that no extra arguments are added
if basename != 'svn':
exit_code, actual_stdout, actual_stderr = svntest.main.run_svn(1, *varargs)
else:
exit_code, actual_stdout, actual_stderr = svntest.main.run_command(svntest.main.svn_binary,
1, False, *varargs)
# Delete and perform search and replaces on the lines from the
# actual and expected output that may differ between build
# environments.
exp_stdout = process_lines(exp_stdout)
exp_stderr = process_lines(exp_stderr)
actual_stdout = process_lines(actual_stdout)
actual_stderr = process_lines(actual_stderr)
svntest.verify.compare_and_display_lines("Standard output does not match.",
"STDOUT", exp_stdout, actual_stdout)
svntest.verify.compare_and_display_lines("Standard error does not match.",
"STDERR", exp_stderr, actual_stderr)
def getopt_no_args(sbox):
"run svn with no arguments"
run_one_test(sbox, 'svn')
def getopt__version(sbox):
"run svn --version"
run_one_test(sbox, 'svn--version', '--version')
def getopt__version__quiet(sbox):
"run svn --version --quiet"
run_one_test(sbox, 'svn--version--quiet', '--version', '--quiet')
def getopt__version__verbose(sbox):
"run svn --version --verbose"
run_one_test(sbox, 'svn--version--verbose', '--version', '--verbose')
def getopt__help(sbox):
"run svn --help"
run_one_test(sbox, 'svn--help', '--help')
def getopt_help(sbox):
"run svn help"
run_one_test(sbox, 'svn_help', 'help')
def getopt_help_log_switch(sbox):
"run svn help log switch"
run_one_test(sbox, 'svn_help_log_switch', 'help', 'log', 'switch')
def getopt_help_bogus_cmd(sbox):
"run svn help bogus-cmd"
run_one_test(sbox, 'svn_help_bogus-cmd', 'help', 'bogus-cmd')
def getopt_config_option(sbox):
"--config-option's spell checking"
sbox.build(create_wc=False, read_only=True)
expected_stderr = '.*W205000.*did you mean.*'
expected_stdout = svntest.verify.AnyOutput
svntest.actions.run_and_verify_svn2(expected_stdout, expected_stderr, 0,
'info',
'--config-option',
'config:miscellanous:diff-extensions=' +
'-u -p',
sbox.repo_url)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
getopt_no_args,
getopt__version,
getopt__version__quiet,
getopt__version__verbose,
getopt__help,
getopt_help,
getopt_help_bogus_cmd,
getopt_help_log_switch,
getopt_config_option,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
import aos_sw_api
from .user_pass_ip import username, password, switch_ip, api_version
def test_auth_client():
with aos_sw_api.Client(switch_ip, api_version, username, password) as client:
print(client._session.headers)
def main():
test_auth_client()
if __name__ == "__main__":
main()
|
from tornado.gen import Task
from . import cached
from . validate import validate
from . import internal
from . import singleton
import logging
import ujson
class AppNotFound(Exception):
pass
class ApplicationInfoAdapter(object):
def __init__(self, data):
self.id = data.get("id")
self.name = data.get("name")
self.title = data.get("title")
self.versions = data.get("versions")
def dump(self):
return {
"id": self.id,
"name": self.name,
"title": self.title,
"versions": self.versions
}
class EnvironmentClient(object, metaclass=singleton.Singleton):
def __init__(self, cache):
self.internal = internal.Internal()
self.cache = cache
async def list_apps(self):
@cached(kv=self.cache,
h="environment_apps",
json=True)
async def get():
try:
response = await self.internal.request(
"environment",
"get_apps")
except internal.InternalError:
logging.exception("Failed to list apps")
return []
else:
return response
all_apps = await get()
return {
app_data["app_name"]: app_data["app_title"]
for app_data in all_apps
}
@validate(app_name="str_name", app_info=ApplicationInfoAdapter)
async def set_app_info(self, app_name, app_info):
"""
Do not use this method for any purposes except testing,
as its affect the cache permanently
"""
async with self.cache.acquire() as db:
await db.set("environment_app:" + app_name, ujson.dumps(app_info.dump()))
async def get_app_info(self, app_name):
@cached(kv=self.cache,
h=lambda: "environment_app:" + app_name,
json=True)
async def get():
response = await self.internal.request(
"environment",
"get_app_info",
app_name=app_name)
return response
try:
app_info = await get()
return ApplicationInfoAdapter(app_info)
except internal.InternalError as e:
if e.code == 404:
raise AppNotFound()
else:
raise e
async def get_app_title(self, app_name):
app_info = await self.get_app_info(app_name)
return app_info.title
async def get_app_versions(self, app_name):
app_info = await self.get_app_info(app_name)
return app_info.versions
|
from fireo import models as mdl
class User(mdl.Model):
id = mdl.IDField()
name = mdl.TextField()
class Student(mdl.Model):
id = mdl.IDField()
address = mdl.TextField()
def test_parent_key_with_id_field():
u = User()
u.id = 'test_parent_key_with_id_field'
u.name = 'testing parent key with id field'
u.save()
s1 = Student(parent=u.key)
s1.id = 'student_id_in_test_parent_key'
s1.address = 'testing parent student key'
s1.save()
s2 = Student.collection.get(s1.key)
assert s1.id == s2.id
assert s1.key == s2.key
class User1(mdl.Model):
user_id = mdl.IDField()
name = mdl.TextField()
class Student1(mdl.Model):
student_id = mdl.IDField()
address = mdl.TextField()
def test_parent_key_with_custom_field():
u = User1()
u.user_id = 'test_parent_key_with_custom_field'
u.name = 'testing parent key with_custom_field'
u.save()
s1 = Student1(parent=u.key)
s1.student_id = 'student_id_test_parent_key_with_custom_field'
s1.address = 'testing parent student keywith_custom_field'
s1.save()
s2 = Student1.collection.get(s1.key)
assert s1.student_id == s2.student_id
assert s1.key == s2.key
assert s1.id is None
assert s2.id is None
def test_parent_key_custom_field_without_value():
u = User1()
u.name = 'testing parent key with_custom_field'
u.save()
s1 = Student1(parent=u.key)
s1.address = 'testing parent student keywith_custom_field'
s1.save()
s2 = Student1.collection.get(s1.key)
assert s1.student_id == s2.student_id
assert s1.key == s2.key
assert s1.id is None
assert s2.id is None
def test_parent_key_without_value():
u = User()
u.id = 'test_parent_key_without_value'
u.name = 'testing parent key without value'
u.save()
s1 = Student(parent=u.key)
s1.id = 'student_id_in_test_parent_key_without_value'
s1.address = 'testing parent student key without value'
s1.save()
s2 = Student.collection.get(s1.key)
assert s1.id == s2.id
assert s1.key == s2.key
class Company(mdl.Model):
name = mdl.TextField()
class Employee(mdl.Model):
address = mdl.TextField()
def test_parent_key_without_id_field():
c = Company()
c.name = 'testing parent key without id field'
c.save()
e1 = Employee(parent=c.key)
e1.address = 'testing parent student key without field'
e1.save()
e2 = Employee.collection.get(e1.key)
assert e1.id == e2.id
assert e1.key == e2.key
def test_parent_key_with_value():
c = Company()
c.id = 'test_parent_key_with_value'
c.name = 'testing parent key with value'
c.save()
e1 = Employee(parent=c.key)
e1.id = 'student_test_parent_key_with_values'
e1.address = 'testing parent student key with value'
e1.save()
e2 = Employee.collection.get(e1.key)
assert e1.id == e2.id
assert e1.key == e2.key
def test_parent_key_with_id_name():
c = Company()
c.id = 'test_parent_key_with_id_name'
c.name = 'testing parent key with value'
c.save()
e1 = Employee(parent=c.key)
e1.id = 'student_test_parent_key_with_id_name'
e1.address = 'testing parent student key with value'
e1.save()
e2 = Employee.collection.get(e1.key)
assert e1.id == e2.id
assert e1.key == e2.key
assert e1.id != 'test_parent_key_with_id_name'
assert e2.id != 'test_parent_key_with_id_name'
|
from PyQt5.QtWidgets import QApplication, QMainWindow, QToolButton, QLineEdit, QMessageBox
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl, QSize
from PyQt5.QtGui import QIcon, QPixmap
#Variaveis
home_url = "https://www.google.com"
facebook_url = "https://www.facebook.com"
twitter_url = "https://www.twitter.com"
youtube_url = "https://www.youtube.com"
application = QApplication([])
#INTERFACE
mainWindow = QMainWindow()
mainWindow.setGeometry(0,0,1350,690)
mainWindow.setMinimumHeight(690)
mainWindow.setMaximumHeight(690)
mainWindow.setMinimumWidth(1350)
mainWindow.setMaximumWidth(1350)
mainWindow.setWindowTitle("NAVEGADOR")
mainWindow.setStyleSheet("background-color: rgb(0,0,0);")
#RENDERIZAR PAGINAS DO NAVEGADOR
web = QWebEngineView(mainWindow)
web.setGeometry(0,30,1350,690)
web.setStyleSheet("background-color: rgb(255,255,255);")
web.load(QUrl(home_url))
#Linha de Entrada da Url
go_line = QLineEdit(mainWindow)
go_line.setGeometry(350,5,500,20)
go_line.setStyleSheet("background-color: rgb(255,255,255);")
#Butão Home
home_button = QToolButton(mainWindow)
home_button.setGeometry(10,3,20,20)
home_button_icon = QIcon()
home_button_icon.addPixmap(QPixmap("img/home.png"))
home_button.setIcon(home_button_icon)
home_button.setStyleSheet("background-color: transparent")
#Butão Reload
reload_button = QToolButton(mainWindow)
reload_button.setGeometry(60,3,20,20)
reload_button_icon = QIcon()
reload_button_icon.addPixmap(QPixmap("img/reciclar.png"))
reload_button.setIcon(reload_button_icon)
reload_button.setStyleSheet("background-color: transparent")
#Butão Voutar
back_button = QToolButton(mainWindow)
back_button.setGeometry(140,3,20,20)
back_button_icon = QIcon()
back_button_icon.addPixmap(QPixmap("img/retorna.png"))
back_button.setIcon(back_button_icon)
back_button.setStyleSheet("background-color: transparent")
#Butão Para Frente
forward_button = QToolButton(mainWindow)
forward_button.setGeometry(180,5,20,20)
forward_button_icon = QIcon()
forward_button_icon.addPixmap(QPixmap("img/frente.png"))
forward_button.setIcon(forward_button_icon)
forward_button.setStyleSheet("background-color: transparent")
forward_button.setIconSize(QSize(50,50))
#Butão de Psguisa
go_button = QToolButton(mainWindow)
go_button.setGeometry(880,3,30,30)
go_button_icon = QIcon()
go_button_icon.addPixmap(QPixmap("img/procurar.png"))
go_button.setIcon(go_button_icon)
go_button.setStyleSheet("background-color: transparent")
#Butão de Facebook
facebook_button = QToolButton(mainWindow)
facebook_button.setGeometry(1200,1,30,30)
facebook_button_icon = QIcon()
facebook_button_icon.addPixmap(QPixmap("img/facebook.png"))
facebook_button.setStyleSheet("background-color: transparent")
facebook_button.setIcon(facebook_button_icon)
#Butão de Youtube
youtube_button = QToolButton(mainWindow)
youtube_button.setGeometry(1250,1,30,30)
youtube_button_icon = QIcon()
youtube_button_icon.addPixmap(QPixmap("img/youtube.png"))
youtube_button.setStyleSheet("background-color: transparent")
youtube_button.setIcon(youtube_button_icon)
youtube_button.setIconSize(QSize(20,20))
#Butão de Twiter
twiter_button = QToolButton(mainWindow)
twiter_button.setGeometry(1300,1,30,30)
twiter_button_icon = QIcon()
twiter_button_icon.addPixmap(QPixmap("img/twitter.png"))
twiter_button.setStyleSheet("background-color: transparent")
twiter_button.setIcon(twiter_button_icon)
def home(mainWindow):
web.load(QUrl(home_url))
def reload(mainWindow):
web.reload()
def back(mainWindow):
web.back()
def forward(mainWindow):
web.forward()
def go(mainWindow):
go_url = go_line.text()
web.load(QUrl(go_url))
def facebook(mainWindow):
web.load(QUrl(facebook_url))
def twitter(mainWindow):
web.load(QUrl(twitter_url))
def youtube(mainWindow):
web.load(QUrl(youtube_url))
def Downloads(item):
item.accept()
msg = QMessageBox()
msg.setWindowTitle("Dowloads")
msg.setText("O seu download foi iniciado")
msg.setIcon(QMessageBox.Warning)
msg.exec_()
#Adicionar função no butão
home_button.clicked.connect(home)
reload_button.clicked.connect(reload)
back_button.clicked.connect(back)
forward_button.clicked.connect(forward)
facebook_button.clicked.connect(facebook)
twiter_button.clicked.connect(twitter)
youtube_button.clicked.connect(youtube)
go_button.clicked.connect(go)
web.page().profile().downloadRequested.connect(Downloads)
mainWindow.show()
application.exec_()
|
# 读取人物名称
f = open('name.txt')
data = f.read()
data0 = data.split('|')
# 读取兵器名称
f2 = open('weapon.txt')
# data2 = f2.read()
i = 1
for line in f2.readlines():
if i % 2 == 1:
print(line.strip('\n'))
i += 1
f3 = open('sanguo.txt',encoding='GB18030')
print(f3.read().replace('\n',''))
#
#
# def func(filename):
# print(open(filename).read())
# print('test func')
#
#
#
# func('name.txt')
|
thistuple = ("apple", "banana", "cherry", "apple", "cherry")
print(thistuple)
fruits = ("apple", "banana", "cherry")
(green, yellow, red) = fruits
print(green)
print(yellow)
print(red)
|
import grpc
import time
import drivers.xarm.wrapper.xarm_api as xai
from concurrent import futures
import robot_con.xarm_shuidi_grpc.xarm.xarm_pb2 as xarm_msg
import robot_con.xarm_shuidi_grpc.xarm.xarm_pb2_grpc as xarm_rpc
import numpy as np
class XArmServer(xarm_rpc.XArmServicer):
def __init__(self, arm_ip):
"""
:param _arm_x: an instancde of arm.XArmAPI
:return:
"""
super().__init__()
self._xai_x = xai.XArmAPI(port=arm_ip)
if self._xai_x.has_err_warn:
if self._xai_x.get_err_warn_code()[1][0] == 1:
print("The Emergency Button is pushed in to stop!")
input("Release the emergency button and press any key to continue. Press Enter to continue...")
self._xai_x.clean_error()
self._xai_x.clean_error()
self._xai_x.motion_enable()
self._xai_x.set_mode(1) # servo motion mode
self._xai_x.set_state(state=0)
self._xai_x.reset(wait=True)
self._xai_x.clean_gripper_error()
self._xai_x.set_gripper_enable(1)
self._xai_x.set_gripper_mode(0)
self.__speed = 5000
self._xai_x.set_gripper_speed(self.__speed) # 1000-5000
self._xai_x.set_gripper_position(850) # 1000-5000
def get_jnt_values(self, request, context):
code, jnt_values = self._xai_x.get_servo_angle(is_radian=True)
if code != 0:
raise Exception(f"The returned code of get_servo_angle is wrong! Code: {code}")
return xarm_msg.JntValues(data=np.array(jnt_values).tobytes())
def move_jspace_path(self, request, context):
nrow = request.length
ncol = request.njnts
flat_path_data = np.frombuffer(request.data, dtype=np.float64)
path = flat_path_data.reshape((nrow, ncol))
for jnt_values in path.tolist():
self._xai_x.set_servo_angle_j(jnt_values, is_radian=True)
time.sleep(.01)
return xarm_msg.Status(value=xarm_msg.Status.DONE)
def jaw_to(self, request, context):
self.__speed = request.speed
self._xai_x.set_gripper_speed(self.__speed)
self._xai_x.set_gripper_position(request.position, wait=True)
return xarm_msg.Status(value=xarm_msg.Status.DONE)
def get_gripper_status(self, request, context):
speed = self.__speed
code, position = self._xai_x.get_gripper_position()
if code != 0:
raise Exception(f"The returned code of get_gripper_position is wrong! Code: {code}")
return xarm_msg.GripperStatus(speed=speed,
position=position)
def serve(arm_ip = "192.168.50.99", host = "localhost:18300"):
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
options = [('grpc.max_message_length', 100 * 1024 * 1024)]
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10), options = options)
xai_server = XArmServer(arm_ip)
xarm_rpc.add_XArmServicer_to_server(xai_server, server)
server.add_insecure_port(host)
server.start()
print("The XArm server is started!")
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve(arm_ip = "192.168.1.185", host = "192.168.50.99:18300")
|
from django.db import models
# Create your models here.
from django.forms import Textarea
from django.db import models
from django.contrib import admin
from django.utils.encoding import python_2_unicode_compatible
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFit
# Create your models here.
@python_2_unicode_compatible
class Icon(models.Model):
title = models.CharField(max_length=30)
icon= models.ImageField("Menu Icon", upload_to="images/", blank=True, null=True)
height = models.IntegerField()
width = models.IntegerField()
url = models.CharField(max_length=500, blank=True, null=True)
icon_thumbnail = ImageSpecField(source='icon',
processors=[ResizeToFit(50,50)],
format='PNG',
options={'quality': 80})
class Meta:
verbose_name = 'icon'
verbose_name_plural = 'icons'
def __str__(self):
return self.title
def __unicode__(self):
return unicode(self.title)
# Create your models here.
@python_2_unicode_compatible
class SocialAuthIcon(models.Model):
title = models.CharField(max_length=30)
icon= models.ImageField("Social Authentication Icon", upload_to="images/", blank=True, null=True)
height = models.IntegerField()
width = models.IntegerField()
url = models.CharField(max_length=500, blank=True, null=True)
icon_thumbnail = ImageSpecField(source='icon',
processors=[ResizeToFit(40,40)],
format='PNG',
options={'quality': 40})
class Meta:
verbose_name = 'Social Authentication Icon'
verbose_name_plural = 'Social Authentication Icons'
def __str__(self):
return self.title
def __unicode__(self):
return unicode(self.title)
@python_2_unicode_compatible
class SocialIcon(models.Model):
title = models.CharField(max_length=30)
icon= models.ImageField("Social Icon", upload_to="images/", blank=True, null=True)
height = models.IntegerField()
width = models.IntegerField()
url = models.CharField(max_length=500, blank=True, null=True)
icon_thumbnail = ImageSpecField(source='icon',
processors=[ResizeToFit(50,50)],
format='PNG',
options={'quality': 80})
class Meta:
verbose_name = 'social icon'
verbose_name_plural = 'social icons'
def __str__(self):
return self.title
def __unicode__(self):
return unicode(self.title)
@python_2_unicode_compatible
class ActionIcon(models.Model):
action_id = models.CharField(max_length=30, blank=True, null=True)
title = models.CharField(max_length=30)
icon= models.ImageField("Action Icon", upload_to="images/", blank=True, null=True)
height = models.IntegerField()
width = models.IntegerField()
url = models.CharField(max_length=500, blank=True, null=True)
icon_thumbnail = ImageSpecField(source='icon',
processors=[ResizeToFit(50,50)],
format='PNG',
options={'quality': 80})
class Meta:
verbose_name = 'action icon'
verbose_name_plural = 'action icons'
def __str__(self):
return self.title
def __unicode__(self):
return unicode(self.title)
"""
Register with admin
"""
class ActionIconAdmin(admin.ModelAdmin):
fields = ('action_id','title','icon','url','height','width')
class SocialIconAdmin(admin.ModelAdmin):
fields = ('title','icon','url','height','width')
class IconAdmin(admin.ModelAdmin):
fields = ('title','icon','url','height','width')
class SocialAuthIconAdmin(admin.ModelAdmin):
fields = ('title','icon','url','height','width')
admin.site.register(Icon)
admin.site.register(SocialIcon)
admin.site.register(ActionIcon)
admin.site.register(SocialAuthIcon)
|
# coding: utf-8
import os
import sys
import logging
from importlib import import_module
def pytest_sessionstart(session):
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
)
proj_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
sys.path.insert(0, proj_dir)
jqdatasdk = import_module("jqdatasdk")
client = jqdatasdk.JQDataClient.instance()
client.ensure_auth()
assert jqdatasdk.is_auth()
def pytest_sessionfinish(session, exitstatus):
import_module("jqdatasdk").logout()
logging.info("test session finish, exit status: %s", exitstatus)
|
# Generated by Django 3.1.4 on 2020-12-02 13:33
import datetime
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Lexer",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=225)),
],
),
migrations.CreateModel(
name="Paste",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("content", models.TextField(default="")),
(
"inspiration_date",
models.DateField(
default=datetime.datetime(2020, 12, 9, 13, 33, 47, 290810)
),
),
(
"lex",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="paste.lexer"
),
),
],
),
]
|
#!/usr/bin/python3
"""
Script language: Python3
Talks to:
- Vega node (REST)
- Vega wallet (REST)
Apps/Libraries:
- REST: requests (https://pypi.org/project/requests/)
"""
# Note: this file uses smart-tags in comments to section parts of the code to
# show them as snippets in our documentation. They are not necessary to be
# included when creating your own custom code.
#
# Example of smart-tags:
# __something:
# some code here
# :something__
import json
import requests
import time
import os
import helpers
node_url_rest = os.getenv("NODE_URL_REST")
if not helpers.check_url(node_url_rest):
print("Error: Invalid or missing NODE_URL_REST environment variable.")
exit(1)
wallet_server_url = os.getenv("WALLETSERVER_URL")
if not helpers.check_url(wallet_server_url):
print("Error: Invalid or missing WALLETSERVER_URL environment variable.")
exit(1)
wallet_name = os.getenv("WALLET_NAME")
if not helpers.check_var(wallet_name):
print("Error: Invalid or missing WALLET_NAME environment variable.")
exit(1)
wallet_passphrase = os.getenv("WALLET_PASSPHRASE")
if not helpers.check_var(wallet_passphrase):
print("Error: Invalid or missing WALLET_PASSPHRASE environment variable.")
exit(1)
# Help guide users against including api version suffix on url
wallet_server_url = helpers.check_wallet_url(wallet_server_url)
#####################################################################################
# W A L L E T S E R V I C E #
#####################################################################################
print(f"Logging into wallet: {wallet_name}")
# __login_wallet:
# Log in to an existing wallet
req = {"wallet": wallet_name, "passphrase": wallet_passphrase}
response = requests.post(f"{wallet_server_url}/api/v1/auth/token", json=req)
helpers.check_response(response)
token = response.json()["token"]
# :login_wallet__
assert token != ""
print("Logged in to wallet successfully")
# __get_pubkey:
# List key pairs and select public key to use
headers = {"Authorization": f"Bearer {token}"}
response = requests.get(f"{wallet_server_url}/api/v1/keys", headers=headers)
helpers.check_response(response)
keys = response.json()["keys"]
pubkey = keys[0]["pub"]
# :get_pubkey__
assert pubkey != ""
print("Selected pubkey for signing")
#####################################################################################
# L I S T P R O P O S A L S #
#####################################################################################
# There are two types of REST request for proposals on Vega:
# 1 - MARKET proposals (/governance/market/proposals)
# 2 - ASSET proposals (/governance/asset/proposals)
# Note: In the future users will be able to call an endpoint to retrieve ALL proposals.
# __get_proposals:
# Request a list of proposals on a Vega network
response = requests.get(node_url_rest + "/governance/market/proposals")
helpers.check_response(response)
proposals = response.json()
print("Proposals:\n{}".format(json.dumps(proposals, indent=2, sort_keys=True)))
# :get_proposals__
proposalID = proposals["data"][0]["proposal"]["id"]
assert proposalID != ""
print(f"Proposal found: {proposalID}")
#####################################################################################
# P R O P O S A L D E T A I L S #
#####################################################################################
# __get_proposal_detail:
# Request results of a specific proposal on a Vega network
response = requests.get(node_url_rest + "/governance/proposal/" + proposalID)
helpers.check_response(response)
response_json = response.json()
print("Proposal:\n{}".format(json.dumps(response_json, indent=2, sort_keys=True)))
# :get_proposal_detail__
#####################################################################################
# P A R T Y P R O P O S A L S #
#####################################################################################
# __get_proposals_by_party:
# Request results of a specific proposal on a Vega network
response = requests.get(node_url_rest + "/parties/" + pubkey + "/proposals")
helpers.check_response(response)
response_json = response.json()
print("Party proposals:\n{}".format(json.dumps(response_json, indent=2, sort_keys=True)))
# :get_proposals_by_party__
|
# type: ignore[attr-defined]
'''
:created: 2019-07-29
:author: Leandro (Cerberus1746) Benedet Garcia'''
from dataclasses import _FIELDS, _get_field
from typing import Optional, Any, Type
from keyword import iskeyword
def add_field(dataclass_instance: object, field_name: str, field_type: Type[Any],
field_value: Optional[Any] = None):
'''Create a new dataclass field
:param dataclass_instance: The input dataclass
:param field_name: The name of the field
:param field_type: The field type
:param field_value: The value of the field'''
setattr(dataclass_instance, field_name, field_value)
getattr(dataclass_instance, _FIELDS)[field_name] = _get_field(dataclass_instance, field_name,
field_type)
dataclass_instance.__annotations__[field_name] = field_type
def check_field(dataclass_instance: object, field_name: str) -> bool:
'''Return true if the field exist inside the input dataclass
:param dataclass_instance: The dataclass to check
:param field_name: The name of the field to check'''
return field_name in getattr(dataclass_instance, _FIELDS)
def delete_field(dataclass_instance: object, field_name: str, default: Optional[Any] = None) -> Any:
'''Remove the field from the dataclass
:param dataclass_instance: The dataclass to delete the field from
:param field_name: The field name to delete
:param default: the value to be returned if the field doesn't exist
:raises KeyError: If default is `None` and the field doesn't exist'''
getattr(dataclass_instance, _FIELDS).pop(field_name, None)
dataclass_instance.__annotations__.pop(field_name, None)
if hasattr(dataclass_instance, field_name):
cur_value = getattr(dataclass_instance, field_name)
delattr(dataclass_instance, field_name)
return cur_value
if default is not None:
return default
raise KeyError(field_name)
def item_zip(*dicts_input):
'''Function to iterate across multiple dictionaries
An example in how to use this function is:
.. code:: python
first_dict = {"first": 1}
second_dict = {"second": 2}
for first_key, first_var, second_key, second_var in item_zip(first_dict , second_dict):
#prints first, 1
print(first_key, first_var)
#prints second, 2
print(second_key, second_var)'''
zipped = zip(*[cur_dict.items() for cur_dict in dicts_input])
return map(lambda x: (x[0][0], x[0][1], x[1][0], x[1][1]), zipped)
def valid_variable(name):
'''Check if string is a valid keyword name
:param name: the string to be checked'''
return name.isidentifier() and not iskeyword(name)
__all__ = ("add_field", "check_field", "delete_field", "item_zip", "valid_variable")
|
import pickle
import json
import os
OUTPUT_DIR='json/'
os.makedirs(OUTPUT_DIR, exist_ok=True)
# -------------------------------------
# SheetProperties
# -------------------------------------
with open("dict_SheetProperties.pickle", mode='rb') as f:
data = pickle.load(f)
sd = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=2)
with open(OUTPUT_DIR+"SheetProperties.json", mode='w') as f:
json.dump(data, f, sort_keys=True, ensure_ascii=False, indent=2)
# -------------------------------------
# ShieldsLag
# -------------------------------------
with open("dict_ShieldsLag.pickle", mode='rb') as f:
data = pickle.load(f)
sd = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=2)
with open(OUTPUT_DIR+"ShieldsLag.json", mode='w') as f:
json.dump(data, f, sort_keys=True, ensure_ascii=False, indent=2)
# -------------------------------------
# OutOfShieldStartup
# -------------------------------------
with open("dict_OutOfShieldStartup.pickle", mode='rb') as f:
data = pickle.load(f)
sd = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=2)
with open(OUTPUT_DIR+"OutOfShieldStartup.json", mode='w') as f:
json.dump(data, f, sort_keys=True, ensure_ascii=False, indent=2)
# -------------------------------------
# CharacterList
# -------------------------------------
with open("dict_CharacterList.pickle", mode='rb') as f:
data = pickle.load(f)
sd = json.dumps(data, sort_keys=True, ensure_ascii=False, indent=2)
with open(OUTPUT_DIR+"CharacterList.json", mode='w') as f:
json.dump(data, f, sort_keys=True, ensure_ascii=False, indent=2)
|
import re
#replace charset notation in text
def repalce_charset_notation(content, new_notation):
#try:
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
charset_all = charset_re.findall(content)
pragma_all = pragma_re.findall(content)
xml_all = xml_re.findall(content)
def match_fun(orig):
def match_func(match, origin = orig):
newtext = re.sub(origin, new_notation, match.group(0))
return newtext
return match_func
if charset_all:
content = re.sub(r'<meta.*?charset=["\']*(.+?)["\'>]', match_fun(charset_all[0]), content)
if pragma_all:
content = re.sub(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', match_fun(pragma_all[0]), content)
if xml_all:
content = re.sub(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]', match_fun(xml_all[0]), content)
#except:
#print("Exception in: repalce_charset_notation")
return content
text = '<meta http-equiv="Content-Type" content="text/html; charset=gb2312">'
newtext = repalce_charset_notation(text, "utf-8")
print(text)
print(newtext)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.