content stringlengths 5 1.05M |
|---|
import unittest
from pyaligner import *
class TestMatrixMatchSeq ( unittest.TestCase ):
def test_matrix_match_seq_global ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer )
match_seq = matrix.calc_match_seq()
self.assertEqual( match_seq[0], "ACTG-" )
self.assertEqual( match_seq[1], "ACAAA" )
self.assertEqual( match_seq[2], 2 )
def test_matrix_match_seq_semiglobal ( self ):
scorer = Scorer( 5, -1, -3, 7 )
seqh = Sequence( "ACTG" )
seqv = Sequence( "ACAAA" )
matrix = DPMatrix( seqh, seqv, scorer, True )
match_seq = matrix.calc_match_seq()
self.assertEqual( match_seq[0], "ACTG" )
self.assertEqual( match_seq[1], "ACAA" )
self.assertEqual( match_seq[2], 2 )
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Mon 16 Apr 08:18:08 2012 CEST
bob_packages = ['bob.core', 'bob.io.base', 'bob.sp', 'bob.math', 'bob.learn.activation', 'bob.learn.linear']
from setuptools import setup, find_packages, dist
dist.Distribution(dict(setup_requires=['bob.extension>=2.0.7', 'bob.blitz'] + bob_packages))
from bob.blitz.extension import Extension, Library, build_ext
from bob.extension.utils import load_requirements
build_requires = load_requirements()
# Define package version
version = open("version.txt").read().rstrip()
packages = ['boost']
boost_modules = ['system']
setup(
name='bob.learn.em',
version=version,
description='Bindings for EM machines and trainers of Bob',
url='http://gitlab.idiap.ch/bob/bob.learn.em',
license='BSD',
author='Andre Anjos',
author_email='andre.anjos@idiap.ch',
long_description=open('README.rst').read(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
setup_requires = build_requires,
install_requires = build_requires,
ext_modules = [
Extension("bob.learn.em.version",
[
"bob/learn/em/version.cpp",
],
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
version = version,
),
Library("bob.learn.em.bob_learn_em",
[
"bob/learn/em/cpp/Gaussian.cpp",
"bob/learn/em/cpp/GMMMachine.cpp",
"bob/learn/em/cpp/GMMStats.cpp",
"bob/learn/em/cpp/IVectorMachine.cpp",
"bob/learn/em/cpp/KMeansMachine.cpp",
"bob/learn/em/cpp/LinearScoring.cpp",
"bob/learn/em/cpp/PLDAMachine.cpp",
"bob/learn/em/cpp/FABase.cpp",
"bob/learn/em/cpp/JFABase.cpp",
"bob/learn/em/cpp/ISVBase.cpp",
"bob/learn/em/cpp/JFAMachine.cpp",
"bob/learn/em/cpp/ISVMachine.cpp",
"bob/learn/em/cpp/FABaseTrainer.cpp",
"bob/learn/em/cpp/JFATrainer.cpp",
"bob/learn/em/cpp/ISVTrainer.cpp",
"bob/learn/em/cpp/EMPCATrainer.cpp",
"bob/learn/em/cpp/GMMBaseTrainer.cpp",
"bob/learn/em/cpp/IVectorTrainer.cpp",
"bob/learn/em/cpp/KMeansTrainer.cpp",
"bob/learn/em/cpp/MAP_GMMTrainer.cpp",
"bob/learn/em/cpp/ML_GMMTrainer.cpp",
"bob/learn/em/cpp/PLDATrainer.cpp",
],
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
version = version,
),
Extension("bob.learn.em._library",
[
"bob/learn/em/gaussian.cpp",
"bob/learn/em/gmm_stats.cpp",
"bob/learn/em/gmm_machine.cpp",
"bob/learn/em/kmeans_machine.cpp",
"bob/learn/em/kmeans_trainer.cpp",
"bob/learn/em/ml_gmm_trainer.cpp",
"bob/learn/em/map_gmm_trainer.cpp",
"bob/learn/em/jfa_base.cpp",
"bob/learn/em/jfa_machine.cpp",
"bob/learn/em/jfa_trainer.cpp",
"bob/learn/em/isv_base.cpp",
"bob/learn/em/isv_machine.cpp",
"bob/learn/em/isv_trainer.cpp",
"bob/learn/em/ivector_machine.cpp",
"bob/learn/em/ivector_trainer.cpp",
"bob/learn/em/plda_base.cpp",
"bob/learn/em/plda_machine.cpp",
"bob/learn/em/empca_trainer.cpp",
"bob/learn/em/plda_trainer.cpp",
"bob/learn/em/linear_scoring.cpp",
"bob/learn/em/main.cpp",
],
bob_packages = bob_packages,
packages = packages,
boost_modules = boost_modules,
version = version,
),
],
cmdclass = {
'build_ext': build_ext
},
classifiers = [
'Framework :: Bob',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
import requests
url = 'http://localhost:5000/predict_api'
r = requests.post(url,json={'experience':2, 'test_score':9, 'interview_score':6})
print(r.json())
|
import time
from ..modules import VideoCapture
from .observable import observable
class CaptureVideoPipe:
def __init__(self, conf, **kwargs):
super().__init__()
self.video_capture = VideoCapture(conf, **kwargs).open()
self.stop = False
observable.register("stop", self, self.on_stop)
def __iter__(self):
return self.generator()
def __len__(self):
return len(self.video_capture)
def on_stop(self):
self.stop = True
def generator(self):
idx = 0
_start_time = time.perf_counter()
while not self.stop:
image = self.video_capture.read()
if image is not None:
data = {
"idx": idx,
"fps": (idx + 1) / (time.perf_counter() - _start_time),
"name": f"{idx:06d}",
"image": image
}
idx += 1
yield data
else:
break
def close(self):
self.video_capture.close()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 10:52:15 2018
@author: Vladislav Sláma
"""
import numpy as np
def get_distance_matrixes(coor_i, coor_j):
Nat_i = len(coor_i)
Nat_j = len(coor_j)
R = np.tile(coor_j,(Nat_i,1,1)) - np.swapaxes(np.tile(coor_i,(Nat_j,1,1)),0,1)
# R[ii,jj] = coor_j[jj] - coor_i[ii]
RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2))
# # calculation of tensors with interatomic distances
# R=np.zeros((self.Nat,self.Nat,3),dtype='f8') # mutual distance vectors
# for ii in range(self.Nat):
# for jj in range(ii+1,self.Nat):
# R[ii,jj,:]=self.coor[ii]-self.coor[jj]
# R[jj,ii,:]=-R[ii,jj,:]
# RR=np.sqrt(np.power(R[:,:,0],2)+np.power(R[:,:,1],2)+np.power(R[:,:,2],2)) # mutual distances
return R,RR
class MMAtoms:
"""
Class which manages numerical propagation on time (simple molecular dynamics)
Parameters
----------
R_coor : numpy array of reals (dimension Nat x 3)
Actual coordinates of atoms (electrons)
R0_coor : numpy array of reals (dimension Nat x 3)
Equilibrium coordinates of atoms (electrons)
vel : numpy array (dimension Nat x 3)
Actual atomic (electronic) velocities
mass : numpy array (dimension Nat)
Atomic masses
accel : numpy array (dimension Nat x 3)
Actual atomic (electronic) accelerations
k_force : numpy array (dimension Nat)
Force constants of harmonic oscillators
direct : normalized numpy array (dimension Nat x 3)
Vectors pointing in directions in which atom is allowed to move
(vectors normalized to 1)
bonds : list (dimension Nbonds x 2)
List of bonded atoms (nearest neighbors)
"""
def __init__(self, R_coor, R0_coor, mass, vel, k_force, direct=None, bonds=None):
self.Nat = len(R_coor)
self.R_coor = R_coor.copy()
self.R0_coor = R0_coor.copy()
self.mass = np.tile(mass,(3,1)).T
if vel is not None:
self.vel = vel.copy()
else:
self.vel = vel
self.accel = np.zeros((self.Nat,3),dtype='f8')
self.k_force = np.tile(k_force,(3,1)).T
self.direct = direct.copy()
if bonds is not None:
self.bonds = bonds.copy()
else:
self.bonds = bonds
self.dt = None
self.time = 0.0
self.MASK = None
self.RR = None
self.R = None
R,RR = get_distance_matrixes(R0_coor, R0_coor)
self.RR = RR
self.R = R
# TODO: Normalize direct
def get_force(self, RR3=None, RR5=None, MASK=None):
if RR3 is None:
RR3 = self.RR*self.RR*self.RR
if RR5 is None:
RR5 = RR3*self.RR*self.RR
disp = self.R_coor - self.R0_coor
""" Calculation of the total force acting on each particle - in AU"""
# Contribution from harmonic oscillator
Force = - disp*self.k_force
# Contribution from dipole-dipole interaction
XI = np.swapaxes(np.tile(disp,(self.Nat,1,1)),0,1)
F = np.zeros(XI.shape,dtype="f8")
for jj in range(3):
#F[:,:,jj] = XI[:,:,jj]/RR3 - 3*self.R[:,:,jj]*(np.sum(XI*self.R,axis=2))/RR5
#F[:,:,jj] = np.fill_diagonal(F[:,:,jj],0.0)
F[:,:,jj] = - np.divide(XI[:,:,jj], RR3, out=np.zeros_like(XI[:,:,jj]), where=RR3!=0)
temp = np.sum(XI*self.R,axis=2)
F[:,:,jj] += 3*self.R[:,:,jj] * np.divide(temp, RR5, out=np.zeros_like(temp), where=RR5!=0)
# Keep only nearest neighbor contribution
if MASK is not None:
F[MASK,jj] = 0.0
Force += np.sum(F, axis=0)
#print(F[:,:,0])
# If movement is restricted to some direction
if self.direct is not None:
Force_projected = np.sum(self.direct*Force,axis=1)
Force = self.direct * np.tile(Force_projected,(3,1)).T
return Force
def propagate(self, dt, Nsteps, Nstep_write, MASK=None, init_vel=False):
RR3 = self.RR*self.RR*self.RR
RR5 = RR3*self.RR*self.RR
coor_write = []
if self.vel is None:
init_vel = True
# Init velocities at half-step
if init_vel:
Force = self.get_force(RR3=RR3, RR5=RR5, MASK=MASK)
self.accel = Force / self.mass
self.vel = self.accel*dt/2
# Propagate particles
for step in range(Nsteps+1):
if step%Nstep_write == 0:
coor_write.append(self.R_coor)
self.R_coor = self.R_coor + self.vel*dt
Force = self.get_force(RR3=RR3, RR5=RR5, MASK=MASK)
self.accel = Force/ self.mass
self.vel = self.vel + self.accel*dt
if step == 1000:
print(Force)
return coor_write
|
from .conftest import GoProCameraTest
from goprocam import GoProCamera
class SetZoomTest(GoProCameraTest):
def test_set_zoom_min(self):
with self.monkeypatch.context() as m:
def verify_cmd(self, cmd):
assert cmd == 'digital_zoom?range_pcnt=0'
m.setattr(GoProCamera.GoPro, 'gpControlCommand', verify_cmd)
self.goprocam.setZoom(zoomLevel=0)
def test_set_zoom_max(self):
with self.monkeypatch.context() as m:
def verify_cmd(self, cmd):
assert cmd == 'digital_zoom?range_pcnt=100'
m.setattr(GoProCamera.GoPro, 'gpControlCommand', verify_cmd)
self.goprocam.setZoom(zoomLevel=100)
def test_set_zoom_over_max(self):
with self.monkeypatch.context() as m:
def verify_cmd(self, cmd):
raise ValueError("shouldn't get here")
m.setattr(GoProCamera.GoPro, 'gpControlCommand', verify_cmd)
self.goprocam.setZoom(zoomLevel=101)
|
import re
from typing import List, Optional
from structures import FoundSegment
class UnitFilter:
def filter(self, line_number: int, line: Optional[str]) -> List[FoundSegment]:
return [FoundSegment(line_number,0,line)]
def CodeBlockFilterFactory(give_in_block: bool):
class CodeBlockFilter:
def __init__(self):
self.in_code_block = False
def filter(self, line_number: int, line: Optional[str]) -> List[FoundSegment]:
if line is None:
return []
if line[0:3] == "```":
self.in_code_block = not self.in_code_block
return []
if self.in_code_block == give_in_block:
return [FoundSegment(line_number,0,line)] # If the line is (or is out of) a code block, return an array with a single line
else:
return []
return CodeBlockFilter
def HeadingFilterFactory(heading_level: int):
class Heading:
def __init__(self):
self.codeblockfilter = CodeBlockFilterFactory(False)()
def filter(self, line_number: int, line: Optional[str]) -> List[FoundSegment]:
codeblock_segments = self.codeblockfilter.filter(line_number, line)
if len(codeblock_segments) != 1: # There should only be one segment (a whole line) if there is a match
return []
line_segment = codeblock_segments[0]
count = 0
while line_segment.text[count] == "#":
count += 1
valid = (heading_level != 0 and count == heading_level) or (heading_level == 0 and count != 0)
return [FoundSegment(line_number, 0, line)] if valid else [] # headers take up the full line no matter what
return Heading
# UnderHeadingFilter
# Keeps track of which heading level it is under to see whether to keep a line
# link_printer is a function which will format the link for printing
# TODO: There is a bug in this function, where it will claim the position as the position of the LINK, not the match.
def LinkFilterFactory(link_printer):
link_regex = re.compile(r'\[([^\]]*)\]\(([^\]]*)\)')
class LinkFilter:
def __init__(self):
self.codeblockfilter = CodeBlockFilterFactory(False)()
def filter(self, line_number: int, line:str) -> List[FoundSegment]:
codeblock_segments = self.codeblockfilter.filter(line_number, line)
if len(codeblock_segments) != 1: # There should only be one segment (a whole line) if there is a match
return []
line = codeblock_segments[0]
result = []
for match in link_regex.finditer(line.text):
if not match is None:
link_text = link_printer(match.groups()) # TODO: Reimplement link printer
link_position = match.start()
result.append(FoundSegment(line_number, link_position, link_text))
return result
return LinkFilter
# BoldFilter
# ItalicFilter
# Filters related to tables
# ul>li
# ol>li
# todo item
# Checked todo item
# Unchecked todo item
all = {
'all': UnitFilter,
'headings': HeadingFilterFactory(0),
'heading1': HeadingFilterFactory(1),
'heading2': HeadingFilterFactory(2),
'heading3': HeadingFilterFactory(3),
'heading4': HeadingFilterFactory(4),
'heading5': HeadingFilterFactory(5),
'heading6': HeadingFilterFactory(6),
'links': LinkFilterFactory(lambda link: "[" + link[0] + "]" + "(" + link[1] + ")"),
'linktarget': LinkFilterFactory(lambda link: link[0]),
'linktext': LinkFilterFactory(lambda link: link[1]),
'insidecodeblock': CodeBlockFilterFactory(True),
'outsidecodeblock': CodeBlockFilterFactory(False),
}
|
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from ctypes import c_int, c_char, c_void_p, c_byte, c_longlong, c_double, Structure, POINTER
class PicamEnumeratedType:
#-------------------------------------------------------------------------*/
#- Function Return Error Codes -------------------------------------------*/
#-------------------------------------------------------------------------*/
Error = c_int(1)
#-------------------------------------------------------------------------*/
#- General String Handling -----------------------------------------------*/
#-------------------------------------------------------------------------*/
EnumeratedType = c_int(29)
#-------------------------------------------------------------------------*/
#- Camera Identification -------------------------------------------------*/
#-------------------------------------------------------------------------*/
Model = c_int(2)
ComputerInterface = c_int(3)
#-------------------------------------------------------------------------*/
#- Camera Plug 'n Play Discovery -----------------------------------------*/
#-------------------------------------------------------------------------*/
DiscoveryAction = c_int(26)
#-------------------------------------------------------------------------*/
#- Camera Access ---------------------------------------------------------*/
#-------------------------------------------------------------------------*/
HandleType = c_int(27)
#-------------------------------------------------------------------------*/
#- Camera Parameters -----------------------------------------------------*/
#-------------------------------------------------------------------------*/
ValueType = c_int(4)
ConstraintType = c_int(5)
Parameter = c_int(6)
#-------------------------------------------------------------------------*/
#- Camera Parameter Values - Enumerated Types ----------------------------*/
#-------------------------------------------------------------------------*/
AdcAnalogGain = c_int(7)
AdcQuality = c_int(8)
CcdCharacteristicsMask = c_int(9)
GateTrackingMask = c_int(36)
GatingMode = c_int(34)
GatingSpeed = c_int(38)
EMIccdGainControlMode = c_int(42)
IntensifierOptionsMask = c_int(35)
IntensifierStatus = c_int(33)
ModulationTrackingMask = c_int(41)
OrientationMask = c_int(10)
OutputSignal = c_int(11)
PhosphorType = c_int(39)
PhotocathodeSensitivity = c_int(40)
PhotonDetectionMode = c_int(43)
PixelFormat = c_int(12)
ReadoutControlMode = c_int(13)
SensorTemperatureStatus = c_int(14)
SensorType = c_int(15)
ShutterTimingMode = c_int(16)
TimeStampsMask = c_int(17)
TriggerCoupling = c_int(30)
TriggerDetermination = c_int(18)
TriggerResponse = c_int(19)
TriggerSource = c_int(31)
TriggerTermination = c_int(32)
#-------------------------------------------------------------------------*/
#- Camera Parameter Information - Value Access ---------------------------*/
#-------------------------------------------------------------------------*/
ValueAccess = c_int(20)
#-------------------------------------------------------------------------*/
#- Camera Parameter Information - Dynamics -------------------------------*/
#-------------------------------------------------------------------------*/
DynamicsMask = c_int(28)
#-------------------------------------------------------------------------*/
#- Camera Parameter Constraints - Enumerated Types -----------------------*/
#-------------------------------------------------------------------------*/
ConstraintScope = c_int(21)
ConstraintSeverity = c_int(22)
ConstraintCategory = c_int(23)
#-------------------------------------------------------------------------*/
#- Camera Parameter Constraints - Regions Of Interest --------------------*/
#-------------------------------------------------------------------------*/
RoisConstraintRulesMask = c_int(24)
#-------------------------------------------------------------------------*/
#- Acquisition Control ---------------------------------------------------*/
#-------------------------------------------------------------------------*/
AcquisitionErrorsMask = c_int(25)
#-------------------------------------------------------------------------*/
#- Acquisition Notification ----------------------------------------------*/
#-------------------------------------------------------------------------*/
AcquisitionState = c_int(37)
AcquisitionStateErrorsMask = c_int(44)
#-------------------------------------------------------------------------*/
#- PicamEnumeratedType; #- (45) */
#-----------------------------------------------------------------------------*/
#- Camera Identification -----------------------------------------------------*/
#-----------------------------------------------------------------------------*/
class PicamModel:
#-------------------------------------------------------------------------*/
#- PI-MTE Series (1419) --------------------------------------------------*/
#-------------------------------------------------------------------------*/
PIMteSeries = c_int(1400)
#- PI-MTE 1024 Series ----------------------------------------------------*/
PIMte1024Series = c_int(1401)
PIMte1024F = c_int(1402)
PIMte1024B = c_int(1403)
PIMte1024BR = c_int(1405)
PIMte1024BUV = c_int(1404)
#- PI-MTE 1024FT Series --------------------------------------------------*/
PIMte1024FTSeries = c_int(1406)
PIMte1024FT = c_int(1407)
PIMte1024BFT = c_int(1408)
#- PI-MTE 1300 Series ----------------------------------------------------*/
PIMte1300Series = c_int(1412)
PIMte1300B = c_int(1413)
PIMte1300R = c_int(1414)
PIMte1300BR = c_int(1415)
#- PI-MTE 2048 Series ----------------------------------------------------*/
PIMte2048Series = c_int(1416)
PIMte2048B = c_int(1417)
PIMte2048BR = c_int(1418)
#- PI-MTE 2K Series ------------------------------------------------------*/
PIMte2KSeries = c_int(1409)
PIMte2KB = c_int(1410)
PIMte2KBUV = c_int(1411)
#-------------------------------------------------------------------------*/
#- PIXIS Series (76) -----------------------------------------------------*/
#-------------------------------------------------------------------------*/
PixisSeries = c_int(0)
#- PIXIS 100 Series ------------------------------------------------------*/
Pixis100Series = c_int(1)
Pixis100F = c_int(2)
Pixis100B = c_int(6)
Pixis100R = c_int(3)
Pixis100C = c_int(4)
Pixis100BR = c_int(5)
Pixis100BExcelon = c_int(54)
Pixis100BRExcelon = c_int(55)
PixisXO100B = c_int(7)
PixisXO100BR = c_int(8)
PixisXB100B = c_int(68)
PixisXB100BR = c_int(69)
#- PIXIS 256 Series ------------------------------------------------------*/
Pixis256Series = c_int(26)
Pixis256F = c_int(27)
Pixis256B = c_int(29)
Pixis256E = c_int(28)
Pixis256BR = c_int(30)
PixisXB256BR = c_int(31)
#- PIXIS 400 Series ------------------------------------------------------*/
Pixis400Series = c_int(37)
Pixis400F = c_int(38)
Pixis400B = c_int(40)
Pixis400R = c_int(39)
Pixis400BR = c_int(41)
Pixis400BExcelon = c_int(56)
Pixis400BRExcelon = c_int(57)
PixisXO400B = c_int(42)
PixisXB400BR = c_int(70)
#- PIXIS 512 Series ------------------------------------------------------*/
Pixis512Series = c_int(43)
Pixis512F = c_int(44)
Pixis512B = c_int(45)
Pixis512BUV = c_int(46)
Pixis512BExcelon = c_int(58)
PixisXO512F = c_int(49)
PixisXO512B = c_int(50)
PixisXF512F = c_int(48)
PixisXF512B = c_int(47)
#- PIXIS 1024 Series -----------------------------------------------------*/
Pixis1024Series = c_int(9)
Pixis1024F = c_int(10)
Pixis1024B = c_int(11)
Pixis1024BR = c_int(13)
Pixis1024BUV = c_int(12)
Pixis1024BExcelon = c_int(59)
Pixis1024BRExcelon = c_int(60)
PixisXO1024F = c_int(16)
PixisXO1024B = c_int(14)
PixisXO1024BR = c_int(15)
PixisXF1024F = c_int(17)
PixisXF1024B = c_int(18)
PixisXB1024BR = c_int(71)
#- PIXIS 1300 Series -----------------------------------------------------*/
Pixis1300Series = c_int(51)
Pixis1300F = c_int(52)
Pixis1300F_2 = c_int(75)
Pixis1300B = c_int(53)
Pixis1300BR = c_int(73)
Pixis1300BExcelon = c_int(61)
Pixis1300BRExcelon = c_int(62)
PixisXO1300B = c_int(65)
PixisXF1300B = c_int(66)
PixisXB1300R = c_int(72)
#- PIXIS 2048 Series -----------------------------------------------------*/
Pixis2048Series = c_int(20)
Pixis2048F = c_int(21)
Pixis2048B = c_int(22)
Pixis2048BR = c_int(67)
Pixis2048BExcelon = c_int(63)
Pixis2048BRExcelon = c_int(74)
PixisXO2048B = c_int(23)
PixisXF2048F = c_int(25)
PixisXF2048B = c_int(24)
#- PIXIS 2K Series -------------------------------------------------------*/
Pixis2KSeries = c_int(32)
Pixis2KF = c_int(33)
Pixis2KB = c_int(34)
Pixis2KBUV = c_int(36)
Pixis2KBExcelon = c_int(64)
PixisXO2KB = c_int(35)
#-------------------------------------------------------------------------*/
#- Quad-RO Series (104) --------------------------------------------------*/
#-------------------------------------------------------------------------*/
QuadroSeries = c_int(100)
Quadro4096 = c_int(101)
Quadro4096_2 = c_int(103)
Quadro4320 = c_int(102)
#-------------------------------------------------------------------------*/
#- ProEM Series (214) ----------------------------------------------------*/
#-------------------------------------------------------------------------*/
ProEMSeries = c_int(200)
#- ProEM 512 Series ------------------------------------------------------*/
ProEM512Series = c_int(203)
ProEM512B = c_int(201)
ProEM512BK = c_int(205)
ProEM512BExcelon = c_int(204)
ProEM512BKExcelon = c_int(206)
#- ProEM 1024 Series -----------------------------------------------------*/
ProEM1024Series = c_int(207)
ProEM1024B = c_int(202)
ProEM1024BExcelon = c_int(208)
#- ProEM 1600 Series -----------------------------------------------------*/
ProEM1600Series = c_int(209)
ProEM1600xx2B = c_int(212)
ProEM1600xx2BExcelon = c_int(210)
ProEM1600xx4B = c_int(213)
ProEM1600xx4BExcelon = c_int(211)
#-------------------------------------------------------------------------*/
#- ProEM+ Series (614) ---------------------------------------------------*/
#-------------------------------------------------------------------------*/
ProEMPlusSeries = c_int(600)
#- ProEM+ 512 Series -----------------------------------------------------*/
ProEMPlus512Series = c_int(603)
ProEMPlus512B = c_int(601)
ProEMPlus512BK = c_int(605)
ProEMPlus512BExcelon = c_int(604)
ProEMPlus512BKExcelon = c_int(606)
#- ProEM+ 1024 Series ----------------------------------------------------*/
ProEMPlus1024Series = c_int(607)
ProEMPlus1024B = c_int(602)
ProEMPlus1024BExcelon = c_int(608)
#- ProEM+ 1600 Series ----------------------------------------------------*/
ProEMPlus1600Series = c_int(609)
ProEMPlus1600xx2B = c_int(612)
ProEMPlus1600xx2BExcelon = c_int(610)
ProEMPlus1600xx4B = c_int(613)
ProEMPlus1600xx4BExcelon = c_int(611)
#-------------------------------------------------------------------------*/
#- ProEM-HS Series (1209) ------------------------------------------------*/
#-------------------------------------------------------------------------*/
ProEMHSSeries = c_int(1200)
#- ProEM-HS 512 Series ---------------------------------------------------*/
ProEMHS512Series = c_int(1201)
ProEMHS512B = c_int(1202)
ProEMHS512BK = c_int(1207)
ProEMHS512BExcelon = c_int(1203)
ProEMHS512BKExcelon = c_int(1208)
#- ProEM-HS 1024 Series --------------------------------------------------*/
ProEMHS1024Series = c_int(1204)
ProEMHS1024B = c_int(1205)
ProEMHS1024BExcelon = c_int(1206)
#-------------------------------------------------------------------------*/
#- PI-MAX3 Series (303) --------------------------------------------------*/
#-------------------------------------------------------------------------*/
PIMax3Series = c_int(300)
PIMax31024I = c_int(301)
PIMax31024x256 = c_int(302)
#-------------------------------------------------------------------------*/
#- PI-MAX4 Series (721) --------------------------------------------------*/
#-------------------------------------------------------------------------*/
PIMax4Series = c_int(700)
#- PI-MAX4 1024i Series --------------------------------------------------*/
PIMax41024ISeries = c_int(703)
PIMax41024I = c_int(701)
PIMax41024IRF = c_int(704)
#- PI-MAX4 1024f Series --------------------------------------------------*/
PIMax41024FSeries = c_int(710)
PIMax41024F = c_int(711)
PIMax41024FRF = c_int(712)
#- PI-MAX4 1024x256 Series -----------------------------------------------*/
PIMax41024x256Series = c_int(705)
PIMax41024x256 = c_int(702)
PIMax41024x256RF = c_int(706)
#- PI-MAX4 2048 Series ---------------------------------------------------*/
PIMax42048Series = c_int(716)
PIMax42048F = c_int(717)
PIMax42048B = c_int(718)
PIMax42048FRF = c_int(719)
PIMax42048BRF = c_int(720)
#- PI-MAX4 512EM Series --------------------------------------------------*/
PIMax4512EMSeries = c_int(708)
PIMax4512EM = c_int(707)
PIMax4512BEM = c_int(709)
#- PI-MAX4 1024EM Series -------------------------------------------------*/
PIMax41024EMSeries = c_int(713)
PIMax41024EM = c_int(715)
PIMax41024BEM = c_int(714)
#-------------------------------------------------------------------------*/
#- PyLoN Series (439) ----------------------------------------------------*/
#-------------------------------------------------------------------------*/
PylonSeries = c_int(400)
#- PyLoN 100 Series ------------------------------------------------------*/
Pylon100Series = c_int(418)
Pylon100F = c_int(404)
Pylon100B = c_int(401)
Pylon100BR = c_int(407)
Pylon100BExcelon = c_int(425)
Pylon100BRExcelon = c_int(426)
#- PyLoN 256 Series ------------------------------------------------------*/
Pylon256Series = c_int(419)
Pylon256F = c_int(409)
Pylon256B = c_int(410)
Pylon256E = c_int(411)
Pylon256BR = c_int(412)
#- PyLoN 400 Series ------------------------------------------------------*/
Pylon400Series = c_int(420)
Pylon400F = c_int(405)
Pylon400B = c_int(402)
Pylon400BR = c_int(408)
Pylon400BExcelon = c_int(427)
Pylon400BRExcelon = c_int(428)
#- PyLoN 1024 Series -----------------------------------------------------*/
Pylon1024Series = c_int(421)
Pylon1024B = c_int(417)
Pylon1024BExcelon = c_int(429)
#- PyLoN 1300 Series -----------------------------------------------------*/
Pylon1300Series = c_int(422)
Pylon1300F = c_int(406)
Pylon1300B = c_int(403)
Pylon1300R = c_int(438)
Pylon1300BR = c_int(432)
Pylon1300BExcelon = c_int(430)
Pylon1300BRExcelon = c_int(433)
#- PyLoN 2048 Series -----------------------------------------------------*/
Pylon2048Series = c_int(423)
Pylon2048F = c_int(415)
Pylon2048B = c_int(434)
Pylon2048BR = c_int(416)
Pylon2048BExcelon = c_int(435)
Pylon2048BRExcelon = c_int(436)
#- PyLoN 2K Series -------------------------------------------------------*/
Pylon2KSeries = c_int(424)
Pylon2KF = c_int(413)
Pylon2KB = c_int(414)
Pylon2KBUV = c_int(437)
Pylon2KBExcelon = c_int(431)
#-------------------------------------------------------------------------*/
#- PyLoN-IR Series (904) -------------------------------------------------*/
#-------------------------------------------------------------------------*/
PylonirSeries = c_int(900)
#- PyLoN-IR 1024 Series --------------------------------------------------*/
Pylonir1024Series = c_int(901)
Pylonir102422 = c_int(902)
Pylonir102417 = c_int(903)
#-------------------------------------------------------------------------*/
#- PIoNIR Series (502) ---------------------------------------------------*/
#-------------------------------------------------------------------------*/
PionirSeries = c_int(500)
Pionir640 = c_int(501)
#-------------------------------------------------------------------------*/
#- NIRvana Series (802) --------------------------------------------------*/
#-------------------------------------------------------------------------*/
NirvanaSeries = c_int(800)
Nirvana640 = c_int(801)
#-------------------------------------------------------------------------*/
#- NIRvana ST Series (1302) ----------------------------------------------*/
#-------------------------------------------------------------------------*/
NirvanaSTSeries = c_int(1300)
NirvanaST640 = c_int(1301)
#-------------------------------------------------------------------------*/
#- NIRvana-LN Series (1102) ----------------------------------------------*/
#-------------------------------------------------------------------------*/
NirvanaLNSeries = c_int(1100)
NirvanaLN640 = c_int(1101)
#-------------------------------------------------------------------------*/
#- PicamModel;
class PicamError:
#-------------------------------------------------------------------------*/
#- Success ---------------------------------------------------------------*/
#-------------------------------------------------------------------------*/
NoError = 0
#-------------------------------------------------------------------------*/
#- General Errors --------------------------------------------------------*/
#-------------------------------------------------------------------------*/
UnexpectedError = 4
UnexpectedNullPointer = 3
InvalidPointer = 35
InvalidCount = 39
InvalidOperation = 42
OperationCanceled = 43
#-------------------------------------------------------------------------*/
#- Library Initialization Errors -----------------------------------------*/
#-------------------------------------------------------------------------*/
LibraryNotInitialized = 1
LibraryAlreadyInitialized = 5
#-------------------------------------------------------------------------*/
#- General String Handling Errors ----------------------------------------*/
#-------------------------------------------------------------------------*/
InvalidEnumeratedType = 16
EnumerationValueNotDefined = 17
#-------------------------------------------------------------------------*/
#- Plug 'n Play Discovery Errors -----------------------------------------*/
#-------------------------------------------------------------------------*/
NotDiscoveringCameras = 18
AlreadyDiscoveringCameras = 19
#-------------------------------------------------------------------------*/
#- Camera Access Errors --------------------------------------------------*/
#-------------------------------------------------------------------------*/
NoCamerasAvailable = 34
CameraAlreadyOpened = 7
InvalidCameraID = 8
InvalidHandle = 9
DeviceCommunicationFailed = 15
DeviceDisconnected = 23
DeviceOpenElsewhere = 24
#-------------------------------------------------------------------------*/
#- Demo Errors -----------------------------------------------------------*/
#-------------------------------------------------------------------------*/
InvalidDemoModel = 6
InvalidDemoSerialNumber = 21
DemoAlreadyConnected = 22
DemoNotSupported = 40
#-------------------------------------------------------------------------*/
#- Camera Parameter Access Errors ----------------------------------------*/
#-------------------------------------------------------------------------*/
ParameterHasInvalidValueType = 11
ParameterHasInvalidConstraintType = 13
ParameterDoesNotExist = 12
ParameterValueIsReadOnly = 10
InvalidParameterValue = 2
InvalidConstraintCategory = 38
ParameterValueIsIrrelevant = 14
ParameterIsNotOnlineable = 25
ParameterIsNotReadable = 26
#-------------------------------------------------------------------------*/
#- Camera Data Acquisition Errors ----------------------------------------*/
#-------------------------------------------------------------------------*/
InvalidParameterValues = 28
ParametersNotCommitted = 29
InvalidAcquisitionBuffer = 30
InvalidReadoutCount = 36
InvalidReadoutTimeOut = 37
InsufficientMemory = 31
AcquisitionInProgress = 20
AcquisitionNotInProgress = 27
TimeOutOccurred = 32
AcquisitionUpdatedHandlerRegistered = 33
InvalidAcquisitionState = 44
NondestructiveReadoutEnabled = 41
#-------------------------------------------------------------------------*/
#- End class PicamError; (45) */
class PicamConstraintType:
_None = 1
Range = 2
Collection = 3
Rois = 4
Pulse = 5
Modulations = 6
#- PicamConstraintType; #- (7) */
class PicamValueType:
#-------------------------------------------------------------------------*/
#- Integral Types --------------------------------------------------------*/
#-------------------------------------------------------------------------*/
Integer = 1
Boolean = 3
Enumeration = 4
#-------------------------------------------------------------------------*/
#- Large Integral Type ---------------------------------------------------*/
#-------------------------------------------------------------------------*/
LargeInteger = 6
#-------------------------------------------------------------------------*/
#- Floating Point Type ---------------------------------------------------*/
#-------------------------------------------------------------------------*/
FloatingPoint = 2
#-------------------------------------------------------------------------*/
#- Regions of Interest Type ----------------------------------------------*/
#-------------------------------------------------------------------------*/
Rois = 5
#-------------------------------------------------------------------------*/
#- Pulse Type ------------------------------------------------------------*/
#-------------------------------------------------------------------------*/
Pulse = 7
#-------------------------------------------------------------------------*/
#- Custom Intensifier Modulation Sequence Type ---------------------------*/
#-------------------------------------------------------------------------*/
Modulations = 8
#-------------------------------------------------------------------------*/
#- PicamValueType; (9) */
# define PI_V('v,c,n) (((PicamConstraintType_##c)<<24)+((PicamValueType_##v)<<16)+(n))
#ExposureTime = v.FloatingPoint<<24 + c.Range<<16 + 23
#ExposureTime = self.PIcamValueType.FloatingPoint<<24 + PicamConstraintType.Range<<16 + 23
class PicamParameter:
#c = PicamConstraintType()
#v = self.PIcamValueType()
def PI_V(self, v, c, n):
return c_int((getattr(PicamConstraintType, c) << 24) + (getattr(PicamValueType, v) << 16) + (n))
def __init__(self):
#--------------------------------------------------------------------------------------*/
#- Shutter Timing ---------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.ExposureTime = self.PI_V('FloatingPoint', 'Range', 23)
self.ShutterTimingMode = self.PI_V('Enumeration', 'Collection', 24)
self.ShutterOpeningDelay = self.PI_V(
'FloatingPoint', 'Range', 46)
self.ShutterClosingDelay = self.PI_V(
'FloatingPoint', 'Range', 25)
self.ShutterDelayResolution = self.PI_V(
'FloatingPoint', 'Collection', 47)
#--------------------------------------------------------------------------------------*/
#- Intensifier ------------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.EnableIntensifier = self.PI_V('Boolean', 'Collection', 86)
self.IntensifierStatus = self.PI_V('Enumeration', '_None', 87)
self.IntensifierGain = self.PI_V('Integer', 'Range', 88)
self.EMIccdGainControlMode = self.PI_V(
'Enumeration', 'Collection', 123)
self.EMIccdGain = self.PI_V('Integer', 'Range', 124)
self.PhosphorDecayDelay = self.PI_V(
'FloatingPoint', 'Range', 89)
self.PhosphorDecayDelayResolution = self.PI_V(
'FloatingPoint', 'Collection', 90)
self.GatingMode = self.PI_V('Enumeration', 'Collection', 93)
self.RepetitiveGate = self.PI_V('Pulse', 'Pulse', 94)
self.SequentialStartingGate = self.PI_V(
'Pulse', 'Pulse', 95)
self.SequentialEndingGate = self.PI_V(
'Pulse', 'Pulse', 96)
self.SequentialGateStepCount = self.PI_V(
'LargeInteger', 'Range', 97)
self.SequentialGateStepIterations = self.PI_V(
'LargeInteger', 'Range', 98)
self.DifStartingGate = self.PI_V('Pulse', 'Pulse', 102)
self.DifEndingGate = self.PI_V('Pulse', 'Pulse', 103)
self.BracketGating = self.PI_V('Boolean', 'Collection', 100)
self.IntensifierOptions = self.PI_V(
'Enumeration', '_None', 101)
self.EnableModulation = self.PI_V('Boolean', 'Collection', 111)
self.ModulationDuration = self.PI_V(
'FloatingPoint', 'Range', 118)
self.ModulationFrequency = self.PI_V(
'FloatingPoint', 'Range', 112)
self.RepetitiveModulationPhase = self.PI_V(
'FloatingPoint', 'Range', 113)
self.SequentialStartingModulationPhase = self.PI_V(
'FloatingPoint', 'Range', 114)
self.SequentialEndingModulationPhase = self.PI_V(
'FloatingPoint', 'Range', 115)
self.CustomModulationSequence = self.PI_V(
'Modulations', 'Modulations', 119)
self.PhotocathodeSensitivity = self.PI_V(
'Enumeration', '_None', 107)
self.GatingSpeed = self.PI_V('Enumeration', '_None', 108)
self.PhosphorType = self.PI_V('Enumeration', '_None', 109)
self.IntensifierDiameter = self.PI_V(
'FloatingPoint', '_None', 110)
#--------------------------------------------------------------------------------------*/
#- Analog to Digital Conversion -------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.AdcSpeed = self.PI_V('FloatingPoint', 'Collection', 33)
self.AdcBitDepth = self.PI_V('Integer', 'Collection', 34)
self.AdcAnalogGain = self.PI_V('Enumeration', 'Collection', 35)
self.AdcQuality = self.PI_V('Enumeration', 'Collection', 36)
self.AdcEMGain = self.PI_V('Integer', 'Range', 53)
self.CorrectPixelBias = self.PI_V('Boolean', 'Collection', 106)
#--------------------------------------------------------------------------------------*/
#- Hardware I/O -----------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.TriggerSource = self.PI_V('Enumeration', 'Collection', 79)
self.TriggerResponse = self.PI_V('Enumeration', 'Collection', 30)
self.TriggerDetermination = self.PI_V(
'Enumeration', 'Collection', 31)
self.TriggerFrequency = self.PI_V('FloatingPoint', 'Range', 80)
self.TriggerTermination = self.PI_V(
'Enumeration', 'Collection', 81)
self.TriggerCoupling = self.PI_V('Enumeration', 'Collection', 82)
self.TriggerThreshold = self.PI_V('FloatingPoint', 'Range', 83)
self.OutputSignal = self.PI_V('Enumeration', 'Collection', 32)
self.InvertOutputSignal = self.PI_V(
'Boolean', 'Collection', 52)
self.AuxOutput = self.PI_V('Pulse', 'Pulse', 91)
self.EnableSyncMaster = self.PI_V('Boolean', 'Collection', 84)
self.SyncMaster2Delay = self.PI_V('FloatingPoint', 'Range', 85)
self.EnableModulationOutputSignal = self.PI_V(
'Boolean', 'Collection', 116)
self.ModulationOutputSignalFrequency = self.PI_V(
'FloatingPoint', 'Range', 117)
self.ModulationOutputSignalAmplitude = self.PI_V(
'FloatingPoint', 'Range', 120)
self.AnticipateTrigger = self.PI_V('Boolean', 'Collection', 131)
self.DelayFromPreTrigger = self.PI_V(
'FloatingPoint', 'Range', 132)
#--------------------------------------------------------------------------------------*/
#- Readout Control --------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.ReadoutControlMode = self.PI_V(
'Enumeration', 'Collection', 26)
self.ReadoutTimeCalculation = self.PI_V(
'FloatingPoint', '_None', 27)
self.ReadoutPortCount = self.PI_V('Integer', 'Collection', 28)
self.ReadoutOrientation = self.PI_V(
'Enumeration', '_None', 54)
self.KineticsWindowHeight = self.PI_V(
'Integer', 'Range', 56)
self.VerticalShiftRate = self.PI_V('FloatingPoint', 'Collection', 13)
self.Accumulations = self.PI_V('LargeInteger', 'Range', 92)
self.EnableNondestructiveReadout = self.PI_V(
'Boolean', 'Collection', 128)
self.NondestructiveReadoutPeriod = self.PI_V(
'FloatingPoint', 'Range', 129)
#--------------------------------------------------------------------------------------*/
#- Data Acquisition -------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.Rois = self.PI_V('Rois', 'Rois', 37)
self.NormalizeOrientation = self.PI_V(
'Boolean', 'Collection', 39)
self.DisableDataFormatting = self.PI_V(
'Boolean', 'Collection', 55)
self.ReadoutCount = self.PI_V('LargeInteger', 'Range', 40)
self.ExactReadoutCountMaximum = self.PI_V(
'LargeInteger', '_None', 77)
self.PhotonDetectionMode = self.PI_V(
'Enumeration', 'Collection', 125)
self.PhotonDetectionThreshold = self.PI_V(
'FloatingPoint', 'Range', 126)
self.PixelFormat = self.PI_V('Enumeration', 'Collection', 41)
self.FrameSize = self.PI_V('Integer', '_None', 42)
self.FrameStride = self.PI_V('Integer', '_None', 43)
self.FramesPerReadout = self.PI_V('Integer', '_None', 44)
self.ReadoutStride = self.PI_V('Integer', '_None', 45)
self.PixelBitDepth = self.PI_V('Integer', '_None', 48)
self.ReadoutRateCalculation = self.PI_V(
'FloatingPoint', '_None', 50)
self.OnlineReadoutRateCalculation = self.PI_V(
'FloatingPoint', '_None', 99)
self.FrameRateCalculation = self.PI_V(
'FloatingPoint', '_None', 51)
self.Orientation = self.PI_V('Enumeration', '_None', 38)
self.TimeStamps = self.PI_V('Enumeration', 'Collection', 68)
self.TimeStampResolution = self.PI_V(
'LargeInteger', 'Collection', 69)
self.TimeStampBitDepth = self.PI_V('Integer', 'Collection', 70)
self.TrackFrames = self.PI_V('Boolean', 'Collection', 71)
self.FrameTrackingBitDepth = self.PI_V(
'Integer', 'Collection', 72)
self.GateTracking = self.PI_V('Enumeration', 'Collection', 104)
self.GateTrackingBitDepth = self.PI_V(
'Integer', 'Collection', 105)
self.ModulationTracking = self.PI_V(
'Enumeration', 'Collection', 121)
self.ModulationTrackingBitDepth = self.PI_V(
'Integer', 'Collection', 122)
#--------------------------------------------------------------------------------------*/
#- Sensor Information -----------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.SensorType = self.PI_V('Enumeration', '_None', 57)
self.CcdCharacteristics = self.PI_V(
'Enumeration', '_None', 58)
self.SensorActiveWidth = self.PI_V(
'Integer', '_None', 59)
self.SensorActiveHeight = self.PI_V(
'Integer', '_None', 60)
self.SensorActiveLeftMargin = self.PI_V(
'Integer', '_None', 61)
self.SensorActiveTopMargin = self.PI_V(
'Integer', '_None', 62)
self.SensorActiveRightMargin = self.PI_V(
'Integer', '_None', 63)
self.SensorActiveBottomMargin = self.PI_V(
'Integer', '_None', 64)
self.SensorMaskedHeight = self.PI_V(
'Integer', '_None', 65)
self.SensorMaskedTopMargin = self.PI_V(
'Integer', '_None', 66)
self.SensorMaskedBottomMargin = self.PI_V(
'Integer', '_None', 67)
self.SensorSecondaryMaskedHeight = self.PI_V(
'Integer', '_None', 49)
self.SensorSecondaryActiveHeight = self.PI_V(
'Integer', '_None', 74)
self.PixelWidth = self.PI_V('FloatingPoint', '_None', 9)
self.PixelHeight = self.PI_V('FloatingPoint', '_None', 10)
self.PixelGapWidth = self.PI_V('FloatingPoint', '_None', 11)
self.PixelGapHeight = self.PI_V('FloatingPoint', '_None', 12)
#--------------------------------------------------------------------------------------*/
#- Sensor Layout ----------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.ActiveWidth = self.PI_V('Integer', 'Range', 1)
self.ActiveHeight = self.PI_V('Integer', 'Range', 2)
self.ActiveLeftMargin = self.PI_V('Integer', 'Range', 3)
self.ActiveTopMargin = self.PI_V('Integer', 'Range', 4)
self.ActiveRightMargin = self.PI_V('Integer', 'Range', 5)
self.ActiveBottomMargin = self.PI_V(
'Integer', 'Range', 6)
self.MaskedHeight = self.PI_V('Integer', 'Range', 7)
self.MaskedTopMargin = self.PI_V('Integer', 'Range', 8)
self.MaskedBottomMargin = self.PI_V(
'Integer', 'Range', 73)
self.SecondaryMaskedHeight = self.PI_V(
'Integer', 'Range', 75)
self.SecondaryActiveHeight = self.PI_V(
'Integer', 'Range', 76)
#--------------------------------------------------------------------------------------*/
#- Sensor Cleaning --------------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.CleanSectionFinalHeight = self.PI_V(
'Integer', 'Range', 17)
self.CleanSectionFinalHeightCount = self.PI_V(
'Integer', 'Range', 18)
self.CleanSerialRegister = self.PI_V(
'Boolean', 'Collection', 19)
self.CleanCycleCount = self.PI_V('Integer', 'Range', 20)
self.CleanCycleHeight = self.PI_V('Integer', 'Range', 21)
self.CleanBeforeExposure = self.PI_V(
'Boolean', 'Collection', 78)
self.CleanUntilTrigger = self.PI_V('Boolean', 'Collection', 22)
self.StopCleaningOnPreTrigger = self.PI_V(
'Boolean', 'Collection', 130)
#--------------------------------------------------------------------------------------*/
#- Sensor Temperature -----------------------------------------------------------------*/
#--------------------------------------------------------------------------------------*/
self.SensorTemperatureSetPoint = self.PI_V(
'FloatingPoint', 'Range', 14)
self.SensorTemperatureReading = self.PI_V(
'FloatingPoint', '_None', 15)
self.SensorTemperatureStatus = self.PI_V(
'Enumeration', '_None', 16)
self.DisableCoolingFan = self.PI_V('Boolean', 'Collection', 29)
self.EnableSensorWindowHeater = self.PI_V(
'Boolean', 'Collection', 127)
#--------------------------------------------------------------------------------------*/
#- PicamParameter; (133) */
#-----------------------------------------------------------------------------*/
#- Acquisition Control -------------------------------------------------------*/
#-----------------------------------------------------------------------------*/
class PicamAvailableData(Structure):
_fields_ = [('initial_readout', c_void_p),
('readout_count', c_longlong)]
#-----------------------------------------------------------------------------*/
class PicamAcquisitionErrorsMask:
_None = 0x0,
DataLost = 0x1,
ConnectionLost = 0x2
#-PicamAcquisitionErrorsMask; # (0x4) */
#-----------------------------------------------------------------------------*/
class PicamCameraID(Structure):
_fields_ = [('model', c_int),
('computer_interface', c_int),
('sensor_name', (c_char * 64)),
('serial_number', (c_char * 64))]
#-----------------------------------------------------------------------------*/
#- Camera Parameter Values - Regions of Interest -----------------------------*/
#-----------------------------------------------------------------------------*/
class PicamRoi(Structure):
_fields_ = [('x', c_int),
('width', c_int),
('x_binning', c_int),
('y', c_int),
('height', c_int),
('y_binning', c_int)]
#- PicamRoi;
#-----------------------------------------------------------------------------*/
class PicamRois(Structure):
_fields_ = [('roi_array', POINTER(PicamRoi)),
('roi_count', c_int)]
#- PicamRois;
#-----------------------------------------------------------------------------*/
#-----------------------------------------------------------------------------*/
#- Camera Parameter Constraints - Enumerated Types ---------------------------*/
#-----------------------------------------------------------------------------*/
class PicamConstraintScope:
Independent = c_int(1)
Dependent = c_int(2)
#- PicamConstraintScope; # (3) */
#-----------------------------------------------------------------------------*/
class PicamConstraintSeverity:
Error = c_int(1)
Warning = c_int(2)
#- PicamConstraintSeverity; # (3) */
#-----------------------------------------------------------------------------*/
class PicamConstraintCategory:
Capable = c_int(1)
Required = c_int(2)
Recommended = c_int(3)
#-} PicamConstraintCategory; # (4) */
#-----------------------------------------------------------------------------*/
#- Camera Parameter Constraints - Range --------------------------------------*/
#-----------------------------------------------------------------------------*/
class PicamRangeConstraint(Structure):
_fields_ = [('scope', c_int),
('severity', c_int),
('empty_set', c_int),
('minimum', c_double),
('maximum', c_double),
('increment', c_double),
('excluded_values_array', POINTER(c_double)),
('excluded_values_count', c_int),
('outlying_values_array', POINTER(c_double)),
('outlying_values_count', c_int)]
#-PicamRangeConstraint;
#-----------------------------------------------------------------------------*/
#- Camera Parameter Constraints - Regions Of Interest ------------------------*/
#-----------------------------------------------------------------------------*/
class PicamRoisConstraintRulesMask:
_None = c_byte(0x00)
XBinningAlignment = c_byte(0x01)
YBinningAlignment = c_byte(0x02)
HorizontalSymmetry = c_byte(0x04)
VerticalSymmetry = c_byte(0x08)
SymmetryBoundsBinning = c_byte(0x10)
#- PicamRoisConstraintRulesMask; # (0x20) */
#-----------------------------------------------------------------------------*/
class PicamRoisConstraint(Structure):
_fields_ = [('scope', c_int),
('severity', c_int),
('empty_set', c_int),
('rules', c_byte),
('maximum_roi_count', c_int),
('x_constraint', PicamRangeConstraint),
('width_constraint', PicamRangeConstraint),
('x_binning_limits_array', POINTER(c_int)),
('x_binning_limits_count', c_int),
('y_constraint', PicamRangeConstraint),
('height_constraint', PicamRangeConstraint),
('y_binning_limits_array', POINTER(c_int)),
('y_binning_limits_count', c_int)]
#-PicamRoisConstraint;
#----------------------------------------------------------------------------*/
# Camera Parameter Constraints - Collection ---------------------------------*/
#----------------------------------------------------------------------------*/
class PicamCollectionConstraint(Structure):
_fields_ = [('scope', c_int),
('severity', c_int),
('values_array', POINTER(c_double)),
('values_count', c_int)]
#- PicamCollectionConstraint;
#-----------------------------------------------------------------------------*/
class PicamAcquisitionStatus(Structure):
_fields_ = [('running', c_int),
('errors', c_int),
('readout_rate', c_double)]
#- PicamAcquisitionStatus;
#*----------------------------------------------------------------------------*/
#* Acquisition Setup - Buffer ------------------------------------------------*/
#*----------------------------------------------------------------------------*/
class PicamAcquisitionBuffer(Structure):
_fields_ = [('memory', c_void_p),
('memory_size', c_longlong)]
#- PicamAcquisitionBuffer;
#*----------------------------------------------------------------------------*/
#* Camera Parameter Values - Custom Intensifier Modulation Sequence ----------*/
#*----------------------------------------------------------------------------*/
class PicamModulation(Structure):
_fields_ = [('duration', c_double),
('frequency', c_double),
('phase', c_double),
('output_signal_frequency', c_double)]
#- PicamModulation;
#*----------------------------------------------------------------------------*/
class PicamModulations(Structure):
_fields_ = [('modulation_array', POINTER(PicamModulation)),
('modulation_count', c_int)]
#- PicamModulations;
#*----------------------------------------------------------------------------*/
#* Camera Parameter Values - Pulse -------------------------------------------*/
#*----------------------------------------------------------------------------*/
class PicamPulse(Structure):
_fields_ = [('delay', c_double),
('delay', c_double)]
#- PicamPulse;
#-----------------------------------------------------------------------------*/
#- Camera Parameter Values - Enumerated Types --------------------------------*/
#-----------------------------------------------------------------------------*/
class PicamAdcAnalogGain:
Low = c_int(1)
Medium = c_int(2)
High = c_int(3)
#-PicamAdcAnalogGain; # (4) */
#-----------------------------------------------------------------------------*/
class PicamAdcQuality:
LowNoise = c_int(1)
HighCapacity = c_int(2)
HighSpeed = c_int(4)
ElectronMultiplied = c_int(3)
#-PicamAdcQuality; # (5) */
#-----------------------------------------------------------------------------*/
class PicamCcdCharacteristicsMask:
_None = c_int(0x000)
BackIlluminated = c_int(0x001)
DeepDepleted = c_int(0x002)
OpenElectrode = c_int(0x004)
UVEnhanced = c_int(0x008)
ExcelonEnabled = c_int(0x010)
SecondaryMask = c_int(0x020)
Multiport = c_int(0x040)
AdvancedInvertedMode = c_int(0x080)
HighResistivity = c_int(0x100)
#-PicamCcdCharacteristicsMask; # (0x200) */
#-----------------------------------------------------------------------------*/
class PicamEMIccdGainControlMode:
Optimal = c_int(1)
Manual = c_int(2)
#-PicamEMIccdGainControlMode; # (3) */
#-----------------------------------------------------------------------------*/
class PicamGateTrackingMask:
_None = c_int(0x0)
Delay = c_int(0x1)
Width = c_int(0x2)
#-PicamGateTrackingMask; /* (0x4) */
#-----------------------------------------------------------------------------*/
class PicamGatingMode:
Repetitive = c_int(1)
Sequential = c_int(2)
Custom = c_int(3)
#-PicamGatingMode; /* (4) */
#-----------------------------------------------------------------------------*/
class PicamGatingSpeed:
Fast = c_int(1)
Slow = c_int(2)
#-PicamGatingSpeed; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamIntensifierOptionsMask:
_None = c_int(0x0)
McpGating = c_int(0x1)
SubNanosecondGating = c_int(0x2)
Modulation = c_int(0x4)
#-PicamIntensifierOptionsMask; /* (0x8) */
#-----------------------------------------------------------------------------*/
class PicamIntensifierStatus:
PicamIntensifierStatus_PoweredOff = c_int(1)
PicamIntensifierStatus_PoweredOn = c_int(2)
#-PicamIntensifierStatus; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamModulationTrackingMask:
_None = c_int(0x0)
Duration = c_int(0x1)
Frequency = c_int(0x2)
Phase = c_int(0x4)
OutputSignalFrequency = c_int(0x8)
#-PicamModulationTrackingMask; /* (0x10) */
#-----------------------------------------------------------------------------*/
class PicamOrientationMask:
Normal = c_int(0x0)
FlippedHorizontally = c_int(0x1)
FlippedVertically = c_int(0x2)
#-PicamOrientationMask; /* (0x4) */
#-----------------------------------------------------------------------------*/
class PicamOutputSignal:
NotReadingOut = c_int(1)
ShutterOpen = c_int(2)
Busy = c_int(3)
AlwaysLow = c_int(4)
AlwaysHigh = c_int(5)
Acquiring = c_int(6)
ShiftingUnderMask = c_int(7)
Exposing = c_int(8)
EffectivelyExposing = c_int(9)
ReadingOut = c_int(10)
WaitingForTrigger = c_int(11)
#-PicamOutputSignal; /* (12) */
#-----------------------------------------------------------------------------*/
class PicamPhosphorType:
P43 = c_int(1)
P46 = c_int(2)
#-PicamPhosphorType; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamPhotocathodeSensitivity:
RedBlue = c_int(1)
SuperRed = c_int(7)
SuperBlue = c_int(2)
UV = c_int(3)
SolarBlind = c_int(10)
Unigen2Filmless = c_int(4)
InGaAsFilmless = c_int(9)
HighQEFilmless = c_int(5)
HighRedFilmless = c_int(8)
HighBlueFilmless = c_int(6)
#-PicamPhotocathodeSensitivity; /* (11) */
#-----------------------------------------------------------------------------*/
class PicamPhotonDetectionMode:
Disabled = c_int(1)
Thresholding = c_int(2)
Clipping = c_int(3)
#-PicamPhotonDetectionMode; /* (4) */
#-----------------------------------------------------------------------------*/
class PicamPixelFormat:
Monochrome16Bit = c_int(1)
#-PicamPixelFormat; /* (2) */
#-----------------------------------------------------------------------------*/
class PicamReadoutControlMode:
FullFrame = c_int(1)
FrameTransfer = c_int(2)
Interline = c_int(5)
Kinetics = c_int(3)
SpectraKinetics = c_int(4)
Dif = c_int(6)
#-PicamReadoutControlMode; /* (7) */
#-----------------------------------------------------------------------------*/
class PicamSensorTemperatureStatus:
Unlocked = c_int(1)
Locked = c_int(2)
#-PicamSensorTemperatureStatus; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamSensorType:
Ccd = c_int(1)
InGaAs = c_int(2)
#-PicamSensorType; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamShutterTimingMode:
Normal = c_int(1)
AlwaysClosed = c_int(2)
AlwaysOpen = c_int(3)
OpenBeforeTrigger = c_int(4)
#-PicamShutterTimingMode; /* (5) */
#-----------------------------------------------------------------------------*/
class PicamTimeStampsMask:
_None = c_int(0x0)
ExposureStarted = c_int(0x1)
ExposureEnded = c_int(0x2)
#-PicamTimeStampsMask; /* (0x4) */
#-----------------------------------------------------------------------------*/
class PicamTriggerCoupling:
AC = c_int(1)
DC = c_int(2)
#-PicamTriggerCoupling; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamTriggerDetermination:
PositivePolarity = c_int(1)
NegativePolarity = c_int(2)
RisingEdge = c_int(3)
FallingEdge = c_int(4)
#-PicamTriggerDetermination; /* (5) */
#-----------------------------------------------------------------------------*/
class PicamTriggerResponse:
NoResponse = c_int(1)
ReadoutPerTrigger = c_int(2)
ShiftPerTrigger = c_int(3)
ExposeDuringTriggerPulse = c_int(4)
StartOnSingleTrigger = c_int(5)
#-PicamTriggerResponse; /* (6) */
#-----------------------------------------------------------------------------*/
class PicamTriggerSource:
External = c_int(1)
Internal = c_int(2)
#-PicamTriggerSource; /* (3) */
#-----------------------------------------------------------------------------*/
class PicamTriggerTermination:
FiftyOhms = c_int(1)
HighImpedance = c_int(2)
#-PicamTriggerTermination; /* (3) */
|
import argparse
from args import init_parser, post_processing
import numpy as np
from envs import make_env
# find the carla module
import os
import math
import random
import time
import torch
import shutil
parser = argparse.ArgumentParser(description='SPC')
init_parser(parser) # See `args.py` for default arguments
args = parser.parse_args()
args = post_processing(args)
CARLA8_TIMEOUT = 100000
CARLA9_TIMEOUT = 20.0
def init_dirs(dir_list):
for path in dir_list:
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
def setup_dirs(args):
save_path = args.save_path
model_path = os.path.join(save_path, 'model')
optim_path = os.path.join(save_path, 'optimizer')
init_dirs([model_path, optim_path])
def create_carla9_env(args):
from envs.CARLA.carla9 import World
import carla # here the carla is installed by pip/conda
try:
import glob
import sys
sys.path.append(glob.glob('**/*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
client = carla.Client("localhost", args.port)
client.set_timeout(CARLA9_TIMEOUT)
carla_world = client.get_world()
settings = carla_world.get_settings()
settings.synchronous_mode = True
client.get_world().apply_settings(settings)
env = World(args, carla_world)
return env
def main():
if not args.resume and os.path.isdir(args.save_path):
print("the save path has already existed!")
exit(0)
setup_dirs(args)
script_path = os.path.join(args.save_path, 'scripts')
if not os.path.isdir(script_path):
shutil.copytree('scripts', script_path)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
env = None # placeholder
if 'carla9' in args.env:
# select CARLA v0.9.x as the platform
env = create_carla9_env(args)
elif 'carla8' in args.env:
# select CARLA v0.8.x as the platform
from envs.CARLA.carla_lib.carla.client import make_carla_client
from envs.CARLA.carla_env import CarlaEnv
client = make_carla_client('localhost', args.port, CARLA8_TIMEOUT)
env = CarlaEnv(client, args)
else:
# select PyTorcs or GTAV as the platform
# which is basically inherited from SPC, not fully supported in IPC
env = make_env(args)
if args.eval:
from evaluate import evaluate_policy
evaluate_policy(args, env)
else:
from train import train_policy
train_policy(args, env)
if __name__ == '__main__':
main()
|
# Copyright (c) 2021.
# The copyright lies with Timo Hirsch-Hoffmann, the further use is only permitted with reference to source
from enum import Enum
class Tier(Enum):
CHALLENGER = "CHALLENGER"
GRANDMASTER = "GRANDMASTER"
MASTER = "MASTER"
DIAMOND = "DIAMOND"
PLATINUM = "PLATINUM"
GOLD = "GOLD"
SILVER = "SILVER"
BRONZE = "BRONZE"
IRON = "IRON"
|
from django.urls import path, include, re_path
from django.conf import settings
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from graphapi.views import KeyedGraphQLView
from graphapi.middleware import QueryProtectionMiddleware
from bundles.views import bundle_view
urlpatterns = [
path("djadmin/", admin.site.urls),
path("admin/people/", include("people_admin.urls")),
path("accounts/", include("allauth.urls")),
path("accounts/profile/", include("profiles.urls")),
path("dashboard/", include("dashboards.urls")),
path("api/v1/", include("v1.urls")),
re_path(
"^graphql/?$",
csrf_exempt(
KeyedGraphQLView.as_view(
graphiql=True, middleware=[QueryProtectionMiddleware(5000)]
)
),
),
path("", include("public.urls")),
path("", include("web.redirects")),
path("data/", include("bulk.urls")),
path("bundles/", include("bundles.urls")),
path("covid19/", bundle_view, {"slug": "covid19"}),
# flatpages
path("about/", TemplateView.as_view(template_name="flat/about.html")),
path(
"about/contributing/",
TemplateView.as_view(template_name="flat/contributing.html"),
),
path(
"about/subscriptions/",
TemplateView.as_view(template_name="flat/subscriptions.html"),
),
path("tos/", TemplateView.as_view(template_name="flat/tos.html")),
path("api/registered/", TemplateView.as_view(template_name="flat/registered.html")),
]
if settings.DEBUG:
from django.views.defaults import page_not_found, server_error
urlpatterns += [
path("404/", page_not_found, {"exception": None}),
path("500/", server_error),
# url(r'^silk/', include('silk.urls', namespace='silk')),
]
|
from .admin import admin
from .frontend import frontend
from .doc import doc, auto
|
"""
Custom integration to integrate Vaillant vSMART with Home Assistant.
For more details about this integration, please refer to
https://github.com/MislavMandaric/home-assistant-vaillant-vsmart
"""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET, CONF_TOKEN
from homeassistant.core import Config, HomeAssistant
from homeassistant.helpers.httpx_client import get_async_client
from vaillant_netatmo_api import ThermostatClient, Token, TokenStore
from .const import (
DOMAIN,
PLATFORMS,
)
from .entity import VaillantCoordinator
from .websockets import async_register_websockets
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up Vaillant vSMART component."""
hass.data.setdefault(DOMAIN, {})
await async_register_websockets(hass)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Vaillant vSMART from a config entry."""
def handle_token_update(token: Token) -> None:
data = entry.data.copy()
data[CONF_TOKEN] = token.serialize()
hass.config_entries.async_update_entry(entry, data=data)
client_id = entry.data.get(CONF_CLIENT_ID)
client_secret = entry.data.get(CONF_CLIENT_SECRET)
token = Token.deserialize(entry.data.get(CONF_TOKEN))
client = ThermostatClient(
get_async_client(hass),
TokenStore(client_id, client_secret, token, handle_token_update),
)
coordinator = VaillantCoordinator(hass, client)
await coordinator.async_config_entry_first_refresh()
hass.data[DOMAIN][entry.entry_id] = coordinator
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import json
from dataclasses import dataclass
from typing import Any
from typing import Optional
import requests
from requests import JSONDecodeError
from requests import Response
from requests.adapters import HTTPAdapter
from telliot_core.datasource import DataSource
from telliot_core.dtypes.datapoint import DataPoint
from telliot_core.dtypes.datapoint import datetime_now_utc
from urllib3.util import Retry
from telliot_feed_examples.utils.log import get_logger
logger = get_logger(__name__)
retry_strategy = Retry(
total=3,
backoff_factor=1,
status_forcelist=[429, 500, 502, 503, 504],
allowed_methods=["POST"],
)
adapter = HTTPAdapter(max_retries=retry_strategy)
@dataclass
class MorphwareV1Source(DataSource[str]):
"""DataSource for Morphware query V1 expected response data."""
async def get_metadata(self) -> Optional[Response]:
"""Fetches EC2 metadata from Morphware API."""
with requests.Session() as s:
# s.mount("https://", adapter)
s.mount("http://", adapter)
json_data = {
"provider": "amazon",
"service": "compute",
"region": "us-east-1",
}
try:
return s.post(
"http://167.172.239.133:5000/products-2",
headers={},
json=json_data,
timeout=0.5,
)
except requests.exceptions.RequestException as e:
logger.error(f"Morphware V1 API error: {e}")
return None
def adjust_data_types(self, data: list[dict[str, Any]]) -> list[str]:
return [json.dumps(d) for d in data]
async def fetch_new_datapoint(self) -> Optional[DataPoint[list[str]]]:
"""Retrieves Amazon EC2 instance pricing metadata from API
hosted by Morphware.
Returns:
array of JSON object strings containing EC2 metadata:
Interface Ec2MetaData {
"Instance Type": str,
"CUDA Cores": int,
"Number of CPUs": int,
"RAM": float,
"On-demand Price per Hour": float,
}
"""
rsp = await self.get_metadata()
if rsp is None:
logger.warning("No response from Morphware V1 API")
return None, None
try:
ec2_metadata = rsp.json()
except JSONDecodeError as e:
logger.error("Morphware V1 source returned invalid JSON:", e.strerror)
return None, None
if ec2_metadata == []:
logger.warning("Morphware V1 source returned no EC2 metadata")
return None, None
ec2_metadata = self.adjust_data_types(ec2_metadata)
datapoint = (ec2_metadata, datetime_now_utc())
self.store_datapoint(datapoint)
logger.info(f"Morphware query V1 data retrieved at time {datapoint[1]}")
return datapoint
|
def generate_nav_urls(host, query, results):
"""Generate links to previous and next pages for navigation."""
cur_page = results.current_page()
if cur_page > 1:
url_prev_page = 'http://' + host + '/search?q=' + query \
+ '&page=' + str(cur_page - 1)
else:
url_prev_page = None
if cur_page < results.pages():
url_next_page = 'http://' + host + '/search?q=' + query \
+ '&page=' + str(cur_page + 1)
else:
url_next_page = None
return {'prev': url_prev_page, 'next': url_next_page}
|
c.NotebookApp.password = u'sha1:2be34ae0f862:56b76d8a81c2db8f2e7038e1a3f6838a4f47131e'
|
import FWCore.ParameterSet.Config as cms
JetNoBptxPSet = cms.PSet(
hltPathsToCheck = cms.vstring(
"HLT_UncorrectedJetE30_NoBPTX_v", # 2017 proposal
"HLT_UncorrectedJetE30_NoBPTX3BX_v",
"HLT_UncorrectedJetE60_NoBPTX3BX_v",
"HLT_UncorrectedJetE70_NoBPTX3BX_v",
),
recCaloJetLabel = cms.InputTag("ak4CaloJets"),
# -- Analysis specific cuts
minCandidates = cms.uint32(1),
# -- Analysis specific binnings
parametersTurnOn = cms.vdouble( 0, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150,
160, 170, 180, 190, 200,
220, 240, 260, 280, 300,
320, 340, 360, 380, 400,
420, 440, 460, 480, 500),
)
|
#!/usr/bin/env python3
# Copyright 2021 Canonical
# See LICENSE file for licensing details.
#
# Learn more at: https://juju.is/docs/sdk
"""Charm for the Ceph Dashboard."""
import json
import logging
import tempfile
from ops.framework import StoredState
from ops.main import main
from ops.model import ActiveStatus, BlockedStatus, StatusBase
from ops.charm import ActionEvent
from typing import List, Union, Tuple
import base64
import interface_tls_certificates.ca_client as ca_client
import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface
import re
import secrets
import socket
import string
import subprocess
import tenacity
import ops_openstack.plugins.classes
import interface_ceph_iscsi_admin_access.admin_access as admin_access
import interface_dashboard
import interface_grafana_dashboard
import interface_http
import interface_radosgw_user
import cryptography.hazmat.primitives.serialization as serialization
import charms_ceph.utils as ceph_utils
import charmhelpers.core.host as ch_host
from pathlib import Path
logger = logging.getLogger(__name__)
TLS_Config = Tuple[Union[bytes, None], Union[bytes, None], Union[bytes, None]]
class CephDashboardCharm(ops_openstack.core.OSBaseCharm):
"""Ceph Dashboard charm."""
_stored = StoredState()
PACKAGES = ['ceph-mgr-dashboard']
CEPH_CONFIG_PATH = Path('/etc/ceph')
TLS_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.key'
TLS_PUB_KEY_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard-pub.key'
TLS_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.crt'
TLS_KEY_AND_CERT_PATH = CEPH_CONFIG_PATH / 'ceph-dashboard.pem'
TLS_CA_CERT_DIR = Path('/usr/local/share/ca-certificates')
TLS_VAULT_CA_CERT_PATH = TLS_CA_CERT_DIR / 'vault_juju_ca_cert.crt'
TLS_CHARM_CA_CERT_PATH = TLS_CA_CERT_DIR / 'charm_config_juju_ca_cert.crt'
TLS_PORT = 8443
DASH_DIR = Path('src/dashboards')
LB_SERVICE_NAME = "ceph-dashboard"
class CharmCephOption():
"""Manage a charm option to ceph command to manage that option"""
def __init__(self, charm_option_name, ceph_option_name,
min_version=None):
self.charm_option_name = charm_option_name
self.ceph_option_name = ceph_option_name
self.min_version = min_version
def is_supported(self) -> bool:
"""Is the option supported on this unit"""
if self.min_version:
return self.minimum_supported(self.min_version)
return True
def minimum_supported(self, supported_version: str) -> bool:
"""Check if installed Ceph release is >= to supported_version"""
return ch_host.cmp_pkgrevno('ceph-common', supported_version) >= 0
def convert_option(self, value: Union[bool, str, int]) -> List[str]:
"""Convert a value to the corresponding value part of the ceph
dashboard command"""
return [str(value)]
def ceph_command(self, value: List[str]) -> List[str]:
"""Shell command to set option to desired value"""
cmd = ['ceph', 'dashboard', self.ceph_option_name]
cmd.extend(self.convert_option(value))
return cmd
class DebugOption(CharmCephOption):
def convert_option(self, value):
"""Convert charm True/False to enable/disable"""
if value:
return ['enable']
else:
return ['disable']
class MOTDOption(CharmCephOption):
def convert_option(self, value):
"""Split motd charm option into ['severity', 'time', 'message']"""
if value:
return value.split('|')
else:
return ['clear']
CHARM_TO_CEPH_OPTIONS = [
DebugOption('debug', 'debug'),
CharmCephOption(
'enable-password-policy',
'set-pwd-policy-enabled'),
CharmCephOption(
'password-policy-check-length',
'set-pwd-policy-check-length-enabled'),
CharmCephOption(
'password-policy-check-oldpwd',
'set-pwd-policy-check-oldpwd-enabled'),
CharmCephOption(
'password-policy-check-username',
'set-pwd-policy-check-username-enabled'),
CharmCephOption(
'password-policy-check-exclusion-list',
'set-pwd-policy-check-exclusion-list-enabled'),
CharmCephOption(
'password-policy-check-complexity',
'set-pwd-policy-check-complexity-enabled'),
CharmCephOption(
'password-policy-check-sequential-chars',
'set-pwd-policy-check-sequential-chars-enabled'),
CharmCephOption(
'password-policy-check-repetitive-chars',
'set-pwd-policy-check-repetitive-chars-enabled'),
CharmCephOption(
'password-policy-min-length',
'set-pwd-policy-min-length'),
CharmCephOption(
'password-policy-min-complexity',
'set-pwd-policy-min-complexity'),
CharmCephOption(
'audit-api-enabled',
'set-audit-api-enabled'),
CharmCephOption(
'audit-api-log-payload',
'set-audit-api-log-payload'),
MOTDOption(
'motd',
'motd',
min_version='15.2.14')
]
def __init__(self, *args) -> None:
"""Setup adapters and observers."""
super().__init__(*args)
super().register_status_check(self.check_dashboard)
self.framework.observe(
self.on.config_changed,
self._configure_dashboard)
self.mon = interface_dashboard.CephDashboardRequires(
self,
'dashboard')
self.ca_client = ca_client.CAClient(
self,
'certificates')
self.radosgw_user = interface_radosgw_user.RadosGWUserRequires(
self,
'radosgw-dashboard',
request_system_role=True)
self.iscsi_user = admin_access.CephISCSIAdminAccessRequires(
self,
'iscsi-dashboard')
self.framework.observe(
self.mon.on.mon_ready,
self._configure_dashboard)
self.framework.observe(
self.ca_client.on.ca_available,
self._configure_dashboard)
self.framework.observe(
self.ca_client.on.tls_server_config_ready,
self._configure_dashboard)
self.framework.observe(
self.radosgw_user.on.gw_user_ready,
self._configure_dashboard)
self.framework.observe(
self.iscsi_user.on.admin_access_ready,
self._configure_dashboard)
self.framework.observe(self.on.add_user_action, self._add_user_action)
self.framework.observe(
self.on.delete_user_action,
self._delete_user_action)
self.ingress = ops_lb_interface.OSLoadbalancerRequires(
self,
'loadbalancer')
self.grafana_dashboard = \
interface_grafana_dashboard.GrafanaDashboardProvides(
self,
'grafana-dashboard')
self.alertmanager = interface_http.HTTPRequires(
self,
'alertmanager-service')
self.prometheus = interface_http.HTTPRequires(
self,
'prometheus')
self.framework.observe(
self.grafana_dashboard.on.dash_ready,
self._configure_dashboard)
self.framework.observe(
self.alertmanager.on.http_ready,
self._configure_dashboard)
self.framework.observe(
self.prometheus.on.http_ready,
self._configure_dashboard)
self.framework.observe(
self.ingress.on.lb_relation_ready,
self._request_loadbalancer)
self.framework.observe(
self.ingress.on.lb_configured,
self._configure_dashboard)
self._stored.set_default(is_started=False)
def _request_loadbalancer(self, _) -> None:
"""Send request to create loadbalancer"""
self.ingress.request_loadbalancer(
self.LB_SERVICE_NAME,
self.TLS_PORT,
self.TLS_PORT,
self._get_bind_ip(),
'httpd')
def _register_dashboards(self) -> None:
"""Register all dashboards with grafana"""
for dash_file in self.DASH_DIR.glob("*.json"):
self.grafana_dashboard.register_dashboard(
dash_file.stem,
json.loads(dash_file.read_text()))
logging.info(
"register_grafana_dashboard: {}".format(dash_file))
def _update_legacy_radosgw_creds(self, access_key: str,
secret_key: str) -> None:
"""Update dashboard db with access & secret key for rados gateways.
This method uses the legacy format which only supports one gateway.
"""
self._apply_file_setting('set-rgw-api-access-key', access_key)
self._apply_file_setting('set-rgw-api-secret-key', secret_key)
def _update_multi_radosgw_creds(self, creds: str) -> None:
"""Update dashboard db with access & secret key for rados gateway."""
access_keys = {c['daemon_id']: c['access_key'] for c in creds}
secret_keys = {c['daemon_id']: c['secret_key'] for c in creds}
self._apply_file_setting(
'set-rgw-api-access-key',
json.dumps(access_keys))
self._apply_file_setting(
'set-rgw-api-secret-key',
json.dumps(secret_keys))
def _support_multiple_gateways(self) -> bool:
"""Check if version of dashboard supports multiple rados gateways"""
return ch_host.cmp_pkgrevno('ceph-common', '16.0') > 0
def _manage_radosgw(self) -> None:
"""Register rados gateways in dashboard db"""
if self.unit.is_leader():
creds = self.radosgw_user.get_user_creds()
cred_count = len(set([
(c['access_key'], c['secret_key'])
for c in creds]))
if cred_count < 1:
logging.info("No object gateway creds found")
return
if self._support_multiple_gateways():
self._update_multi_radosgw_creds(creds)
else:
if cred_count > 1:
logging.error(
"Cannot enable object gateway support. Ceph release "
"does not support multiple object gateways in the "
"dashboard")
else:
self._update_legacy_radosgw_creds(
creds[0]['access_key'],
creds[0]['secret_key'])
def request_certificates(self) -> None:
"""Request TLS certificates."""
if not self.ca_client.is_joined:
logging.debug(
"Cannot request certificates, relation not present.")
return
addresses = set()
if self.ingress.relations:
lb_response = self.ingress.get_frontend_data()
if lb_response:
lb_config = lb_response[self.LB_SERVICE_NAME]
addresses.update(
[i for d in lb_config.values() for i in d['ip']])
else:
logging.debug(
("Defering certificate request until loadbalancer has "
"responded."))
return
for binding_name in ['public']:
binding = self.model.get_binding(binding_name)
addresses.add(binding.network.ingress_address)
addresses.add(binding.network.bind_address)
sans = [str(s) for s in addresses]
sans.append(socket.gethostname())
if self.config.get('public-hostname'):
sans.append(self.config.get('public-hostname'))
self.ca_client.request_server_certificate(socket.getfqdn(), sans)
def _check_for_certs(self) -> bool:
"""Check that charm has TLS data it needs"""
# Check charm config for TLS data
key, cert, _ = self._get_tls_from_config()
if key and cert:
return True
# Check relation for TLS data
try:
self.ca_client.server_key
return True
except ca_client.CAClientError:
return False
def _check_dashboard_responding(self) -> bool:
"""Check the dashboard port is open"""
@tenacity.retry(wait=tenacity.wait_fixed(2),
stop=tenacity.stop_after_attempt(30), reraise=True)
def _check_port(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((ip, port))
assert result == 0
try:
_check_port(self._get_bind_ip(), self.TLS_PORT)
return True
except AssertionError:
return False
def _check_grafana_config(self) -> bool:
"""Check that garfana-api is set if the grafana is in use."""
if self.grafana_dashboard.dashboard_relation:
return bool(self.config.get('grafana-api-url'))
else:
return True
def check_dashboard(self) -> StatusBase:
"""Check status of dashboard"""
checks = [
(ceph_utils.is_dashboard_enabled, 'Dashboard is not enabled'),
(self._check_for_certs, ('No certificates found. Please add a '
'certifcates relation or provide via '
'charm config')),
(self._check_grafana_config, 'Charm config option grafana-api-url '
'not set'),
(self._check_dashboard_responding, 'Dashboard not responding')]
for check_f, msg in checks:
if not check_f():
return BlockedStatus(msg)
return ActiveStatus()
def kick_dashboard(self) -> None:
"""Disable and re-enable dashboard"""
ceph_utils.mgr_disable_dashboard()
ceph_utils.mgr_enable_dashboard()
def _run_cmd(self, cmd: List[str]) -> str:
"""Run command in subprocess
`cmd` The command to run
"""
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return output.decode('UTF-8')
except subprocess.CalledProcessError as exc:
logging.exception("Command failed: {}".format(exc.output))
def _apply_setting(self, ceph_setting: str, value: List[str]) -> str:
"""Apply a dashboard setting"""
cmd = ['ceph', 'dashboard', ceph_setting]
cmd.extend(value)
return self._run_cmd(cmd)
def _apply_file_setting(self, ceph_setting: str,
file_contents: str,
extra_args: List[str] = None) -> None:
"""Apply a setting via a file"""
with tempfile.NamedTemporaryFile(mode='w', delete=True) as _file:
_file.write(file_contents)
_file.flush()
settings = ['-i', _file.name]
if extra_args:
settings.extend(extra_args)
self._apply_setting(ceph_setting, settings)
def _apply_ceph_config_from_charm_config(self) -> None:
"""Read charm config and apply settings to dashboard config"""
for option in self.CHARM_TO_CEPH_OPTIONS:
try:
value = self.config[option.charm_option_name]
except KeyError:
logging.error(
"Unknown charm option {}, skipping".format(
option.charm_option_name))
continue
if option.is_supported():
self._run_cmd(option.ceph_command(value))
else:
logging.warning(
"Skipping charm option {}, not supported".format(
option.charm_option_name))
def _configure_dashboard(self, _) -> None:
"""Configure dashboard"""
self.request_certificates()
if not self.mon.mons_ready:
logging.info("Not configuring dashboard, mons not ready")
return
if self.unit.is_leader() and not ceph_utils.is_dashboard_enabled():
ceph_utils.mgr_enable_dashboard()
self._apply_ceph_config_from_charm_config()
self._configure_tls()
ceph_utils.mgr_config_set(
'mgr/dashboard/{hostname}/server_addr'.format(
hostname=socket.gethostname()),
str(self._get_bind_ip()))
if self.unit.is_leader():
grafana_ep = self.config.get('grafana-api-url')
if grafana_ep:
self._run_cmd([
'ceph', 'dashboard', 'set-grafana-api-url', grafana_ep])
alertmanager_conn = self.alertmanager.get_service_ep_data()
if alertmanager_conn:
alertmanager_ep = 'http://{}:{}'.format(
alertmanager_conn['hostname'],
alertmanager_conn['port'])
self._run_cmd([
'ceph', 'dashboard', 'set-alertmanager-api-host',
alertmanager_ep])
prometheus_conn = self.prometheus.get_service_ep_data()
if prometheus_conn:
prometheus_ep = 'http://{}:{}'.format(
prometheus_conn['hostname'],
prometheus_conn['port'])
self._run_cmd([
'ceph', 'dashboard', 'set-prometheus-api-host',
prometheus_ep])
self._register_dashboards()
self._manage_radosgw()
self._manage_iscsigw()
self._stored.is_started = True
self.update_status()
def _get_bind_ip(self) -> str:
"""Return the IP to bind the dashboard to"""
binding = self.model.get_binding('public')
return str(binding.network.ingress_address)
def _get_tls_from_config(self) -> TLS_Config:
"""Extract TLS config from charm config."""
raw_key = self.config.get("ssl_key")
raw_cert = self.config.get("ssl_cert")
raw_ca_cert = self.config.get("ssl_ca")
if not (raw_key and raw_key):
return None, None, None
key = base64.b64decode(raw_key)
cert = base64.b64decode(raw_cert)
if raw_ca_cert:
ca_cert = base64.b64decode(raw_ca_cert)
else:
ca_cert = None
return key, cert, ca_cert
def _get_tls_from_relation(self) -> TLS_Config:
"""Extract TLS config from certificatees relation."""
if not self.ca_client.is_server_cert_ready:
return None, None, None
key = self.ca_client.server_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
cert = self.ca_client.server_certificate.public_bytes(
encoding=serialization.Encoding.PEM)
try:
root_ca_chain = self.ca_client.root_ca_chain.public_bytes(
encoding=serialization.Encoding.PEM
)
except ca_client.CAClientError:
# A root ca chain is not always available. If configured to just
# use vault with self-signed certificates, you will not get a ca
# chain. Instead, you will get a CAClientError being raised. For
# now, use a bytes() object for the root_ca_chain as it shouldn't
# cause problems and if a ca_cert_chain comes later, then it will
# get updated.
root_ca_chain = bytes()
ca_cert = (
self.ca_client.ca_certificate.public_bytes(
encoding=serialization.Encoding.PEM) +
root_ca_chain)
return key, cert, ca_cert
def _update_iscsigw_creds(self, creds):
self._apply_file_setting(
'iscsi-gateway-add',
'{}://{}:{}@{}:{}'.format(
creds['scheme'],
creds['username'],
creds['password'],
creds['host'],
creds['port']),
[creds['name']])
def _manage_iscsigw(self) -> None:
"""Register rados gateways in dashboard db"""
if self.unit.is_leader():
creds = self.iscsi_user.get_user_creds()
if len(creds) < 1:
logging.info("No iscsi gateway creds found")
return
else:
for c in creds:
self._update_iscsigw_creds(c)
def _configure_tls(self) -> None:
"""Configure TLS."""
logging.debug("Attempting to collect TLS config from relation")
key, cert, ca_cert = self._get_tls_from_relation()
ca_cert_path = self.TLS_VAULT_CA_CERT_PATH
if not (key and cert):
logging.debug("Attempting to collect TLS config from charm "
"config")
key, cert, ca_cert = self._get_tls_from_config()
ca_cert_path = self.TLS_CHARM_CA_CERT_PATH
if not (key and cert):
logging.warn(
"Not configuring TLS, not all data present")
return
self.TLS_KEY_PATH.write_bytes(key)
self.TLS_CERT_PATH.write_bytes(cert)
if ca_cert:
ca_cert_path.write_bytes(ca_cert)
subprocess.check_call(['update-ca-certificates'])
hostname = socket.gethostname()
ceph_utils.dashboard_set_ssl_certificate(
self.TLS_CERT_PATH,
hostname=hostname)
ceph_utils.dashboard_set_ssl_certificate_key(
self.TLS_KEY_PATH,
hostname=hostname)
if self.unit.is_leader():
ceph_utils.mgr_config_set(
'mgr/dashboard/standby_behaviour',
'redirect')
ceph_utils.mgr_config_set(
'mgr/dashboard/ssl',
'true')
# Set the ssl artifacte without the hostname which appears to
# be required even though they aren't used.
ceph_utils.dashboard_set_ssl_certificate(
self.TLS_CERT_PATH)
ceph_utils.dashboard_set_ssl_certificate_key(
self.TLS_KEY_PATH)
self.kick_dashboard()
def _gen_user_password(self, length: int = 12) -> str:
"""Generate a password"""
alphabet = (
string.ascii_lowercase + string.ascii_uppercase + string.digits)
return ''.join(secrets.choice(alphabet) for i in range(length))
def _add_user_action(self, event: ActionEvent) -> None:
"""Create a user"""
username = event.params["username"]
role = event.params["role"]
if not all([username, role]):
event.fail("Config missing")
else:
password = self._gen_user_password()
with tempfile.NamedTemporaryFile(mode='w', delete=True) as fp:
fp.write(password)
fp.flush()
cmd_out = subprocess.check_output([
'ceph', 'dashboard', 'ac-user-create', '--enabled',
'-i', fp.name, username, role]).decode('UTF-8')
if re.match('User.*already exists', cmd_out):
event.fail("User already exists")
else:
event.set_results({"password": password})
def _delete_user_action(self, event: ActionEvent) -> None:
"""Delete a user"""
username = event.params["username"]
try:
self._run_cmd(['ceph', 'dashboard', 'ac-user-delete', username])
event.set_results({"message": "User {} deleted".format(username)})
except subprocess.CalledProcessError as exc:
event.fail(exc.output)
if __name__ == "__main__":
main(CephDashboardCharm)
|
#!/usr/bin/env python
import sys
sys.path.append("/opt/DIRT")
import os
import scipy
import argparse
from dirtIO import IO
from Preprocessing import Preprocessing
from fixImageOrientation import fix_orientation
def options():
parser = argparse.ArgumentParser(description='Test a range of thresholds on an image using DIRT.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i", "--image", help="Input image.", required=True)
parser.add_argument("-t", "--thresholds", help="Comma-separated list of threshold multiplier values.", default="1,5,10,15,20")
args = parser.parse_args()
return args
def main():
args = options()
# IO processing
io = IO()
# Set working paths to job directory
io.setHomePath(os.getcwd())
io.setServerPath(os.getcwd())
# We have to make some directories to store DIRT outputs
if not os.path.exists('Mask'):
os.mkdir('Mask')
if not os.path.exists(os.path.join('Crown','Plots')):
os.makedirs(os.path.join('Crown','Plots'))
if not os.path.exists(os.path.join('Crown','Result')):
os.makedirs(os.path.join('Crown','Result'))
# Set filename
imgName = os.path.basename(args.image)
# Parse thresholds
thrTestValues = args.thresholds.split(",")
# Open the image
try:
fix_orientation(args.image, save_over=True)
img = scipy.misc.imread(args.image, flatten=True)
except:
print('Image not readable')
# Apply each threshold to the image
for value in thrTestValues:
io.setFileName(imgName[:-4] + "_threshold_" + str(value))
prep = Preprocessing(io)
_ = prep.prepocess(img, 1, scale=int(value), nrExRoot=1, marker=39.0, stemCorrection=1)
if __name__ == "__main__":
main()
|
from django.http import JsonResponse
from .models import Restaurant, Chat
from django.shortcuts import render
from django.http import HttpResponse
from django.utils.safestring import mark_safe
from about.models import User
def Post(request):
if request.method == 'POST':
msg = request.POST.get('message', None)
username = User.objects.all().order_by('-id')[0].name
c = Chat(username=username, message=msg, isuser=True)
if msg != '':
c.save()
if msg=='Restaurant':
for restaurant in Restaurant.objects.all():
name = restaurant.name
shop_no = restaurant.address.shop_no
sector = restaurant.address.sector
city = restaurant.address.city
rating = restaurant.rating
response = mark_safe("Name : " + name + "<br>Rating : " + str(rating) + "<br>Address : " + str(shop_no) + "<br>Sector: " + str(sector) + " ," + city)
c = Chat(username='Gladys', message=response, isuser=False)
c.save()
return JsonResponse({'msg': msg, 'username':username})
else:
HttpResponse('Request must be POST')
def chat(request):
c = Chat.objects.all()
return render(request, 'Chatbot/chat.html', {'chat': c})
def Messages(request):
c = Chat.objects.all()
return render(request, 'Chatbot/messages.html', {'chat': c})
|
#!/usr/bin/env python3
import numpy as np
class KMeans(object):
"""
Performs the K-Means Clustering algorithm i.e. Lloyd's algorithm a.k.a. Voronoi iteration or relaxation
Parameters
--------------------------------------------------
k : int the number of clusters to form i.e. the number of centroids to generate
seed : int seed used to initialize the pseudo-random number generator
iterations: int maximum number of iterations to be performed
"""
def __init__(self, k = 2, iterations = 100, seed = None):
self.k = k
self.seed = seed
self.iters = iterations
self.cost = list()
self.classes = None
self.centroids = None
def fit(self, x):
"""
Parameters
--------------------------------------------------
x: ndarray of shape (n_samples, n_features)
Returns
--------------------------------------------------
cost : ndarray of shape (iterations, 1)
centroids: ndarray of shape (n_features, 1)
classes : ndarray of shape (n_samples, 1)
"""
if (isinstance(x, np.ndarray) == False):
raise Exception(f"x should be an ndarray of shape (n_samples, n_features).")
np.random.seed(seed = self.seed)
m = x.shape[0]
classes = np.zeros(m)
distances = np.zeros([m, self.k])
min_cost = np.inf
centroids = x[np.random.randint(m, size = self.k)]
for _ in range(self.iters):
for i in range(self.k):
distances[:,i] = self.euclidean_distance(centroids[i], x)
classes = np.argmin(distances, axis = 1)
for i in range(self.k):
centroids[i] = np.mean(x[classes == i], axis = 0)
curr_cost = self.compute_cost(x, classes, centroids)
self.cost.append(curr_cost)
if curr_cost < min_cost:
min_cost = curr_cost
self.classes = classes
self.centroids = centroids
def predict(self, x):
"""
Parameters
--------------------------------------------------
x: ndarray of shape (n_samples, n_features)
Returns
--------------------------------------------------
y_pred: ndarray of shape (n_samples, 1)
"""
m = x.shape[0]
dist = np.zeros([m, self.k])
for i in range(self.k):
dist[:,i] = self.euclidean_distance(self.centroids[i], x)
y_pred = np.argmin(dist, axis = 1)
return y_pred
def compute_cost(self, x, classes, centroids):
"""
Parameters
--------------------------------------------------
x : ndarray of shape (n_samples, n_features)
classes : ndarray of shape (n_samples, 1)
centroids: ndarray of shape (n_features, 1)
Returns
--------------------------------------------------
cost: ndarray of shape (iterations, 1)
"""
cost = np.array([])
for i in range(self.k):
dist = self.euclidean_distance(centroids[i], x[classes == i])
cost = np.append(cost, dist)
return np.mean(cost)
def euclidean_distance(self, centroid, x):
"""
Parameters
--------------------------------------------------
x : ndarray of shape (n_samples, n_features)
centroids: ndarray of shape (n_features, 1)
Returns
--------------------------------------------------
dist: ndarray of shape (n_samples, k)
"""
#np.sqrt(np.sum((x[i] - centroid)**2))
dist = np.linalg.norm(x - centroid, axis = 1)
return dist
class KMedians(KMeans):
"""
Performs the K-Medians Clustering algorithm.
Parameters
--------------------------------------------------
k : int the number of clusters to form i.e. the number of centroids to generate
seed : int seed used to initialize the pseudo-random number generator
iterations: int maximum number of iterations to be performed
"""
def __init__(self, k = 2, iterations = 100, seed = None):
KMeans.__init__(
self,
k = k,
iterations = iterations,
seed = seed
)
def fit(self, x):
"""
Parameters
--------------------------------------------------
x: ndarray of shape (n_samples, n_features)
Returns
--------------------------------------------------
cost : ndarray of shape (iterations, 1)
centroids: ndarray of shape (n_features, 1)
classes : ndarray of shape (n_samples, 1)
"""
np.random.seed(seed = self.seed)
m = x.shape[0]
classes = np.zeros(m)
distances = np.zeros([m, self.k])
min_cost = np.inf
centroids = x[np.random.randint(m, size = self.k)]
for _ in range(self.iters):
for i in range(self.k):
distances[:,i] = self.euclidean_distance(centroids[i], x)
classes = np.argmin(distances, axis = 1)
for i in range(self.k):
centroids[i] = np.median(x[classes == i], axis = 0)
curr_cost = self.compute_cost(x, classes, centroids)
self.cost.append(curr_cost)
if curr_cost < min_cost:
min_cost = curr_cost
self.classes = classes
self.centroids = centroids
|
#!/usr/bin/env python3
import os
import re
from datetime import date
from src import BoxBack, BoxLog
destination_folder = "0B7nAS5KVLBl4VmlhRDRQRzlrMmM"
string = "https://drive.google.com/drive/u/2/folders/0B7nAS5KVLBl4VmlhRDRQRzlrMmM"
# string = "0B7nAS5KVLBl4VmlhRDRQRzlrMmM"
# string = "fofasdf/0B7nAS5KVLBl4VmlhRDRQRzlrMmM"
result = re.search(r"^(https://.*/folders/)?([^/]*?)$", string)
if result is not None:
print(result.group(2))
# Test the setup of BoxLog
BoxLog.setup()
# # test 1
# BoxBack.setup()
# # Test 2
backup = BoxBack()
# backup.shuffle_backups()
# # Test 3
today = date.today()
config = "ikonquest.com.yaml"
site_object = dict(
tar_file="archive/tar/" + os.path.splitext(config)[0] + "-" + today.isoformat() + ".tar",
zip_file="archive/0/" + os.path.splitext(config)[0] + "-" + today.isoformat() + ".tar.gz",
site_name=os.path.splitext(config)[0],
config="/etc/boxable/boxables/" + config
)
# backup.create_archive(site_object)
# Test 3.5
#print(backup.file_list)
# # Test 4
# backup.upload("google", site_object)
# # Test 5
backup.backup(os.path.splitext(config)[0], "inc")
# # Test 6
# backup.backup(os.path.splitext(config)[0], "full")
# # Test 7
# backup.backup("*", "full")
# tests = call(["bin/gdrive-windows-x64.exe", "info", destination_folder])
# if tests == 0:
# google_folder = check_output(["bin/gdrive-windows-x64.exe", "info", destination_folder])
# google_yaml = yaml.safe_load(google_folder)
#
# print(google_yaml["Name"]+" ["+google_yaml['Id']+"]")
|
"""Defines utility methods for testing files and workspaces"""
from __future__ import unicode_literals
import datetime
import os
import django.contrib.gis.geos as geos
import django.utils.timezone as timezone
from storage.models import CountryData, ScaleFile, Workspace
COUNTRY_NAME_COUNTER = 1
WORKSPACE_NAME_COUNTER = 1
WORKSPACE_TITLE_COUNTER = 1
def create_country(name=None, fips='TT', gmi='TT', iso2='TT', iso3='TST', iso_num=0, border=None, effective=None):
"""Creates a country data model for unit testing
:returns: The file model
:rtype: :class:`storage.models.CountryData`
"""
if not name:
global COUNTRY_NAME_COUNTER
name = 'test-country-%i' % COUNTRY_NAME_COUNTER
COUNTRY_NAME_COUNTER += 1
if not border:
border = geos.Polygon(((0, 0), (0, 10), (10, 10), (10, 0), (0, 0)))
if not effective:
effective = timezone.now()
return CountryData.objects.create(name=name, fips=fips, gmi=gmi, iso2=iso2, iso3=iso3, iso_num=iso_num,
border=border, effective=effective)
def create_file(file_name='my_test_file.txt', file_type='SOURCE', media_type='text/plain', file_size=100,
data_type_tags=[], file_path=None, workspace=None, is_deleted=False, uuid='', last_modified=None,
data_started=None, data_ended=None, source_started=None, source_ended=None,
source_sensor_class=None, source_sensor=None, source_collection=None, source_task=None,
geometry=None, center_point=None, meta_data={}, countries=None, job_exe=None, job_output=None,
recipe=None, recipe_node=None, batch=None, is_superseded=False, superseded=None):
"""Creates a Scale file model for unit testing
:returns: The file model
:rtype: :class:`storage.models.ScaleFile`
"""
if not workspace:
workspace = create_workspace()
job = None
job_type = None
if job_exe:
job = job_exe.job
job_type=job_exe.job.job_type
recipe_type = None
if recipe:
recipe_type = recipe.recipe_type
deleted = None
if is_deleted:
deleted = timezone.now()
scale_file = ScaleFile.objects.create(file_name=file_name, file_type=file_type, media_type=media_type, file_size=file_size,
data_type_tags=data_type_tags, file_path=file_path or 'file/path/' + file_name, workspace=workspace,
is_deleted=is_deleted, deleted=deleted, uuid=uuid, last_modified=last_modified,
data_started=data_started, data_ended=data_ended, source_started=source_started,
source_ended=source_ended, source_sensor_class=source_sensor_class,
source_sensor=source_sensor, source_collection=source_collection, source_task=source_task,
geometry=geometry, center_point=center_point, meta_data=meta_data,
job_exe=job_exe, job=job, job_type=job_type, job_output=job_output,
recipe=recipe, recipe_node=recipe_node, recipe_type=recipe_type, batch=batch,
is_superseded=is_superseded, superseded=superseded)
if countries:
scale_file.countries = countries
scale_file.save()
return scale_file
def create_workspace(name=None, title=None, json_config=None, base_url=None, is_active=True, deprecated=None):
"""Creates a workspace model for unit testing
:returns: The workspace model
:rtype: :class:`storage.models.Workspace`
"""
if not name:
global WORKSPACE_NAME_COUNTER
name = 'test-workspace-%i' % WORKSPACE_NAME_COUNTER
WORKSPACE_NAME_COUNTER += 1
if not title:
global WORKSPACE_TITLE_COUNTER
title = 'Test Workspace %i' % WORKSPACE_TITLE_COUNTER
WORKSPACE_TITLE_COUNTER += 1
if not json_config:
json_config = {
'version': '1.0',
'broker': {
'type': 'host',
'host_path': '/host/path',
}
}
if is_active is False and not deprecated:
deprecated = timezone.now()
return Workspace.objects.create(name=name, title=title, json_config=json_config, base_url=base_url,
is_active=is_active, deprecated=deprecated)
|
#!/usr/bin/python
import urllib2
import re
import os
import sys
import commands
import subprocess
import shlex
import string
import getopt
DELTTE_TEST_CASE = True # True: allCount all count except test cases Flase: allCount including test cases
def search_opt(android_dir, set_file, out_dir):
if len(android_dir)==0 :
print ("Invalid path null !")
else :
child = subprocess.Popen(["find", android_dir,"-iname","*.mk"], stdout=subprocess.PIPE)
out = child.communicate() # return out array
print out
libs_h = open(set_file, 'r')
done = 0
while not done:
aLine = libs_h.readline()
if(aLine != ''):
lib = aLine.strip()
print "\n\n\n >>>>>>>>>>>>>>>>>>>>>>>> lib = %s" %(lib)
mdl = parser_module(out, lib)
(stc_all, stc_test) = parser_staticlink(out, lib)
print (stc_all, stc_test, mdl)
if (stc_all>1):
create_file = "%s/%dstatic_%dtest_%dmodule_<%s>" %(out_dir, stc_all, stc_test, mdl, lib)
print "Create File: %s" %(create_file)
handle = open(create_file, 'wa+')
parser(out, lib, create_file, handle)
handle.close()
else:
pass
else:
done = 1
libs_h.close()
return
def parser_module(out, lib):
moduleCount = 0
for f in out[0].split('\n'):
if len(f)==0:
continue
content = readContent(f)
regexSModule= "(LOCAL_MODULE)\s?(\+\=|\:\=)([^=]*?%s)[^a-zA-Z0-9+]" %(lib)
patternModule = re.compile(regexSModule, re.S)
for i in patternModule.findall(content):
moduleCount = moduleCount + 1
str = ""
for j in i:
str = "%s%s" %(str, j)
return moduleCount
def parser_staticlink(out, lib):
allCount = 0
testCount = 0
for f in out[0].split('\n'):
if len(f)==0:
continue
content = readContent(f)
regexStatic = "(LOCAL_STATIC_LIBRARIES|static_libraries)\s?(\+\=|\:\=)([^=]*?%s)[^a-zA-Z0-9+]" %(lib)
patternStatic = re.compile(regexStatic, re.S)
for i in patternStatic.findall(content):
isTest = checkTestCase(f)
if isTest:
testCount = testCount + 1
if (DELTTE_TEST_CASE and isTest):
continue
allCount = allCount + 1
#print "allCount= %d, testCount = %d" %(allCount, testCount)
return (allCount, testCount)
def parser(out, lib, fout, handle):
index = 0
for f in out[0].split('\n'):
if len(f)==0:
return
else:
content = readContent(f)
regex="(LOCAL_STATIC_LIBRARIES|LOCAL_MODULE|static_libraries)\s?(\+\=|\:\=)([^=]*?%s)[^a-zA-Z0-9+]" %(lib)
pattern = re.compile(regex, re.S)
pre_file = ""
for i in pattern.findall(content):
str = ""
align = ""
if (f != pre_file):
index = index + 1
align = "\n\n\n\n%d) ====%s====\n" %(index, f)
pre_file = f
else:
align = "\n"
for j in i:
str = "%s%s" %(str, j)
in_file = "%s\n%s" %(align, str)
print in_file
handle.write(in_file)
return
def parser_output():
pass
def readContent(f):
handle = file(f, 'rb')
content = handle.read().strip()
handle.close()
return content
def checkTestCase(f):
isTest = False
print "============ %s" %(f)
for teststr in re.split('/', f):
if (teststr == 'test') or (teststr == 'tests') :
isTest=True
break
else:
continue
return isTest
def usage():
print "usage: regrex.py -r ANDROID_DIR -s SET_FILE -d OUT_DIR"
print ""
print " param: ANDROID_DIR: androir root directory"
print " SET_FILE: file which lists all the static libs"
print " OUT_DIR: output the result directory"
print ""
print " output: generate files like: %d(staticlink)_%d(test case)_%d(module count)_<lib module name> in OUT_DIR"
def main():
#./regrex.py -r /media/frank/jam/linaro/aosp-M -s /media/frank/jam/linaro/LMG/LMG-914_static_link/set.txt -d /media/frank/jam/linaro/LMG/LMG-914_static_link/output
opts, args = getopt.getopt(sys.argv[1:], "hr:s:d:")
for op, value in opts:
if op == "-r":
android_dir = value
elif op == "-s":
set_file = value
elif op == "-d":
out_dir = value
elif op == "-h":
usage()
sys.exit()
search_opt(android_dir, set_file, out_dir)
if __name__ == "__main__":
main()
|
import numpy as np
import unittest
from chainercv.utils import assert_is_image
from chainercv.utils import testing
@testing.parameterize(
{
'img': np.random.randint(0, 256, size=(3, 48, 64)),
'color': True, 'check_range': True, 'valid': True},
{
'img': np.random.randint(0, 256, size=(1, 48, 64)),
'color': True, 'check_range': True, 'valid': False},
{
'img': np.random.randint(0, 256, size=(4, 48, 64)),
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * 256,
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * -1,
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * 256,
'color': True, 'check_range': False, 'valid': True},
{
'img': np.random.randint(0, 256, size=(1, 48, 64)),
'color': False, 'check_range': True, 'valid': True},
{
'img': np.random.randint(0, 256, size=(3, 48, 64)),
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * 256,
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * -1,
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * 256,
'color': False, 'check_range': False, 'valid': True},
{
'img': (((0, 1), (2, 3)), ((4, 5), (6, 7)), ((8, 9), (10, 11))),
'color': True, 'check_range': True, 'valid': False},
)
class TestAssertIsImage(unittest.TestCase):
def test_assert_is_image(self):
if self.valid:
assert_is_image(self.img, self.color, self.check_range)
else:
with self.assertRaises(AssertionError):
assert_is_image(self.img, self.color, self.check_range)
testing.run_module(__name__, __file__)
|
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""Methods for introducing bias into a sample"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import scipy.interpolate
def _merge_infrequent_bins(scores, bins, tolerance):
"""Adjust scores so that no bin has less than a fixed percentage of
records. interpolated_pdf_reciprocal and histogram_reciprocal methods
are sensitive to low frequency bins. This method identifies bins that
have low counts and adjusts the scores such that the score will
now show up in the nearest adjacent bin that is above the threshold.
Args:
scores: np.array of scores to bias the sample with
bins: np.array of values signifying the binning for @scores
tolerance: value specifying the percentage threshold at which
to start merging bins. 0.005 means merge bins that have
less than 0.05% of the population.
Returns: np.array of bins
"""
scores = np.array(scores)
num_bins = len(bins) - 1
counts, bins = np.histogram(scores, bins=bins)
counts = counts * 1.0 / counts.sum()
posts_to_remove = []
last_merged = False
for ind in range(0, num_bins - 1):
if not last_merged and counts[ind] < tolerance:
posts_to_remove.append(ind + 1)
last_merged = True
else:
last_merged = False
posts_to_keep = set(range(0, num_bins + 1)).difference(posts_to_remove)
posts_to_keep = list(posts_to_keep)
if bins.size != bins[posts_to_keep].size:
return _merge_infrequent_bins(scores, bins[posts_to_keep], tolerance)
return bins[posts_to_keep]
def interpolated_pdf_reciprocal(scores, bins=None, merge_threshold=0.005):
"""Attempts to take equal samples from each bin. It does this by
constructing an (interpolated) PDF from @scores and then returning
1 / pdf(score).
Args:
scores: np.array of scores to bias the sample with
bins: np.array of values signifying the binning for @scores.
Defaults to 10 bins linearly spaced over scores.
merge_threshold: merge bins that have less than some fraction
of the overall population. Low population bins can significantly
hurt performance of this method. Defaults to 0.01 which
represents 1.0% of the population. Make this False to turn off.
TODO: make merge_threshold d
Returns: np.array of bias weights for each record
"""
if bins is None:
num_bins = 10
# reasonable default
bins = np.linspace(scores.min(), scores.max(), num_bins)
if merge_threshold:
bins = _merge_infrequent_bins(scores, bins, merge_threshold)
counts, bins = np.histogram(scores, bins=bins)
bins = [(bins[i] + bins[i - 1]) / 2.0 for i in range(1, len(bins))]
f = scipy.interpolate.interp1d(
bins, counts, bounds_error=False, fill_value=(counts[0], counts[-1]))
pdf_values = 1.0 / f(scores)
return pdf_values
def histogram_reciprocal(scores, bins=None, merge_threshold=0.005):
"""Attempts to take equal samples from each bin. It does this by
constructing a histogram from @scores and then returning
1 / histogram_value(score).
Args:
scores: np.array of scores to bias the sample with
bins: np.array of values signifying the binning for @scores.
Defaults to 10 bins linearly spaced over scores.
merge_threshold: merge bins that have less than some fraction
of the overall population. Low population bins can significantly
hurt performance of this method. Defaults to 0.01 which
represents 1.0% of the population. Make this False to turn off.
Returns: np.array of bias weights for each record
"""
if bins is None:
num_bins = 10
# reasonable default
bins = np.linspace(scores.min(), scores.max(), num_bins)
if merge_threshold:
bins = _merge_infrequent_bins(scores, bins, merge_threshold)
counts, bins = np.histogram(scores, bins=bins)
index = np.searchsorted(bins, scores, side='left') - 1
pdf_values = 1.0 / counts[index]
return pdf_values
def bin_weights_raw(scores, bins, bin_weights):
"""Assign raw weights to each bin. Does not correct for the raw
frequency of items found in each bin.
Args:
scores: np.array of scores to bias the sample.
bins: np.array of values signifying the binning for @scores.
Returns: np.array of bias weights for each record
"""
index = np.searchsorted(bins, scores, side='left') - 1
return bin_weights[index] / bin_weights.max()
def bin_weights_corrected(scores, bins, bin_weights):
"""Applies weights to each bin but first corrects for the frequency of
items within a bin. This has the effect of attempting to sample
proportionate to the values specified in bin_weights.
Args:
scores: np.array of scores to bias the sample.
bins: np.array of values signifying the binning for @scores.
Returns: np.array of bias weights for each record.
"""
return bin_weights_raw(scores, bins, bin_weights) * \
histogram_reciprocal(scores, bins)
|
def select_products(products):
products = input("Введите товар: ")
result = []
for tovar in products:
if tovar.get('name', ''):
result.append(tovar)
return result
|
# Generated by Django 3.1.6 on 2021-02-05 10:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_sources_ldap", "0009_auto_20210204_1834"),
]
operations = [
migrations.AlterField(
model_name="ldapsource",
name="group_object_filter",
field=models.TextField(
default="(objectClass=group)",
help_text="Consider Objects matching this filter to be Groups.",
),
),
migrations.AlterField(
model_name="ldapsource",
name="user_object_filter",
field=models.TextField(
default="(objectClass=person)",
help_text="Consider Objects matching this filter to be Users.",
),
),
]
|
#!/usr/bin/env python
""" Run the 'cascade' docker container in interactive mode, starting the default app (cascade webserver) in the container.
"""
import sys
import os
import subprocess
import argparse
if __name__ == "__main__":
# Process command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--local", help="Map Cascade webserver onto localhost (127.0.0.1)",
action="store_true")
parser.add_argument("-a", "--always", help="Always restart the container if it stops",
action="store_true")
parser.add_argument("-b", "--bash", help="Start a bash shell in the container",
action="store_true")
args = parser.parse_args()
if args.always and args.bash:
print("--always and --bash are mutually exclusive. Pick a side! Aborting.")
sys.exit()
# If localhost was specified on the command line, then map the Cascade webserver in
# the container to this machine's localhost interface, otherwise host it
# publicly on 0.0.0.0
#
if args.local:
ip_address = '127.0.0.1'
print('Cascade webserver will be mapped to localhost (127.0.0.1)')
else:
ip_address = '0.0.0.0'
print('Cascade webserver will be hosted publicly (0.0.0.0). To use localhost instead, start using "docker_start.py --local".')
working_directory = os.getcwd()
if not os.path.isfile(os.path.join(working_directory, 'cascade', '__main__.py')):
print('ABORTED: You must run this script from the root cascade directory (the root of the git repo).')
sys.exit()
docker_options = '-p {}:5001:5001'.format(ip_address)
docker_options += ' -v "{}":/home/cascade'.format(working_directory)
if args.always:
docker_options += ' --restart always --detach'
else:
docker_options += ' -ti --restart no'
if args.bash:
docker_options += ' --entrypoint "/bin/bash"'
command = 'docker run ' + docker_options + ' cascade'
print(command)
subprocess.call(command, shell=True)
|
from authy.api import AuthyApiClient
from flask import Flask, render_template, request, redirect
import os
import pdb
import time
app = Flask(__name__)
regdata = {}
transfer = {}
authy_api_key = os.environ['AUTHY_API_KEY']
authy_api = AuthyApiClient(authy_api_key)
@app.route('/')
def register():
return render_template('register.html')
@app.route('/register', methods = ['POST'])
def processRegistration():
regdata["email"] = request.form['email']
regdata["country_code"] = request.form['country_code']
regdata["phone_number"] = request.form['phone_number']
print("The phone number is '" + regdata["phone_number"] + "'")
user = authy_api.users.create(regdata["email"],regdata["phone_number"],regdata["country_code"])
if user.ok():
print "Authy ID = %s " % user.id
regdata["authyid"]=user.id
else:
print user.errors()
return redirect('/transaction')
@app.route('/processtransaction', methods = ['POST'])
def processTransaction():
transfer["email"] = request.form['email2']
transfer["acct"] = request.form['acct']
transfer["amt"] = "$ " + request.form['amt']
print("The email address is '" + transfer["email"] + "'")
print("The account number is '" + transfer["acct"] + "'")
print("The US Dollar amount is '" + transfer["amt"] + "'")
print regdata["authyid"]
message = {
"message":"Transfer Money to " + transfer["email"],
"details": {
"From":regdata["email"],
"To":transfer["email"],
"Account Number":transfer["acct"],
"US Dollar Amount":transfer["amt"]
},
"seconds_to_expire":"600"
}
print message
onetouch = authy_api.users.send_onetouch(regdata["authyid"], message)
print onetouch.uuid
while True:
onetouchStatus = authy_api.users.poll_onetouch(onetouch.uuid)
print "Authy Onetouch Approval Status: %s " % onetouchStatus.status
if onetouchStatus.status != "pending" :
break
time.sleep(1)
transfer["status"] = onetouchStatus.status
transfer["transactionID"] = onetouch.uuid
return redirect('/displaytransaction')
@app.route('/transaction')
def transaction():
return render_template('transaction.html', regdata=regdata)
@app.route('/displaytransaction')
def displayTransaction():
return render_template('displayTransaction.html', regdata=regdata, transfer=transfer)
if __name__ == '__main__':
app.run() |
#!/usr/bin/env python
#coding:utf-8
#This script is aimed to grep logs by application(User should input a packageName and then we look up for the process ids then separate logs by process ids).
import os
import sys
packageName=str(sys.argv[1])
def getDeviceId():
devices = []
command = "adb devices -l | sed '1d'| awk '{print $1}'"
result = os.popen(command)
deviceId = result.readline().strip()
if deviceId != "":
devices.append(deviceId)
while (deviceId != ""):
deviceId = result.readline().strip()
if deviceId != "":
devices.append(deviceId)
return devices;
def printPakcageLog(device, packageName):
# print device, packageName
print "Got device: " + device
command = "adb -s %s shell ps | grep %s | awk '{print $2}'"%(device, packageName)
# print command
p = os.popen(command)
##for some applications,there are multiple processes,so we should get all the process id
pid = p.readline().strip()
filters = pid
while(pid != ""):
pid = p.readline().strip()
if (pid != ''):
filters = filters + "|" + pid
#print 'command = %s;filters=%s'%(command, filters)
if (filters != '') :
cmd = 'adb -s %s logcat -v time | grep --color=always -E "%s" '%(device, filters)
os.system(cmd)
devices = getDeviceId();
devicesNum = len(devices);
if devicesNum < 1:
print "Device not found."
elif devicesNum == 1:
device = devices[0]
printPakcageLog(device, packageName)
else:
print "Please chose a dvice, input the index of the device:"
for i in xrange(0, devicesNum):
print str(i) + "\t" + devices[i]
index = raw_input("")
printPakcageLog(devices[int(index)], packageName)
|
'''
===============================================================================
-- Author: Hamid Doostmohammadi, Azadeh Nazemi
-- Create date: 04/11/2020
-- Description: This codes is for SLIC segmentation and
extractions of segments.
-- Status: In progress
===============================================================================
'''
from skimage.exposure import rescale_intensity
from skimage.segmentation import slic
from skimage.util import img_as_float
from skimage import io
import numpy as np
import cv2
import sys
import os
def segment_colorfulness(image, mask):
(B, G, R) = cv2.split(image.astype("float"))
R = np.ma.masked_array(R, mask=mask)
G = np.ma.masked_array(B, mask=mask)
B = np.ma.masked_array(B, mask=mask)
rg = np.absolute(R - G)
yb = np.absolute(0.5 * (R + G) - B)
stdRoot = np.sqrt((rg.std() ** 2) + (yb.std() ** 2))
meanRoot = np.sqrt((rg.mean() ** 2) + (yb.mean() ** 2))
return stdRoot + (0.3 * meanRoot)
for root, dirs, files in os.walk(sys.argv[1]):
for filename in files:
ext = filename[filename.rfind("."):].lower()
fn = os.path.join(root, filename)
orig = cv2.imread(os.path.join(root, filename))
vis = np.zeros(orig.shape[:2], dtype="float")
image = io.imread(fn)
segments = slic(img_as_float(image), n_segments=30,
slic_zero=True)
for v in np.unique(segments):
mask = np.ones(image.shape[:2])
mask[segments == v] = 0
C = segment_colorfulness(orig, mask)
vis[segments == v] = C
vis = rescale_intensity(vis, out_range=(0, 255)).astype("uint8")
alpha = 0.6
overlay = np.dstack([vis] * 3)
output = orig.copy()
cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)
cv2.imwrite(filename, vis)
|
from datetime import datetime, timedelta
import pytest
from bbapilib import BBClient, BBSession, BBAuth
@pytest.fixture
def client():
auth = BBAuth("", "", "")
auth.token = "TOKEN", "VALIDO"
client = BBClient(BBSession(auth))
return client
@pytest.fixture
def client_authenticated():
auth = BBAuth("", "", "")
auth._token = "TOKEN", "VALIDO"
auth.expires_in = datetime.now() + timedelta(seconds=600)
client = BBClient(BBSession(auth))
return client |
#!/usr/bin/env python3
import os
import importlib
import shutil
import sys
from common import clone_repo_src, run_command, check_root_dir, create_dirs
def setup_stp():
curr_dir = os.getcwd()
deps = importlib.import_module("solver-deps")
deps.setup_minisat()
deps.setup_cms()
the_repo = clone_repo_src("STP v2.3.3 (commit 9a59a72e)",
"https://github.com/stp/stp.git", commit='9a59a72e')
os.chdir("{}".format(the_repo))
if os.path.exists('./build'):
shutil.rmtree('./build')
os.mkdir("./build")
os.chdir("./build")
build_cmd = ["cmake", "..", "-GNinja", "-DONLY_SIMPLE=ON",
"-DCMAKE_INSTALL_PREFIX=../../../install/"]
if sys.platform == "darwin":
build_cmd += ["-DCMAKE_BUILD_TYPE=RelWithDebInfo"]
else:
build_cmd += ["-DCMAKE_BUILD_TYPE=Release",
"-DSTATICCOMPILE=ON", "-DBUILD_SHARED_LIBS=OFF"]
run_command(build_cmd)
run_command(["ninja"])
try:
run_command(["ninja", "install"])
except:
pass
os.chdir(curr_dir)
if __name__ == '__main__':
check_root_dir()
create_dirs()
setup_stp()
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 6 14:01:05 2015
@author: lshi
Multiprocess version to calculate reflected signals from FWR2D/3D
useful when have multiple frequencies and time steps to analyse
"""
# using IPython multiprocessing modules, need ipcluster to be started.
import time
from IPython.parallel import Client
import numpy as np
from . import postprocess as pp
def dv_initialize(n_engine,profile = 'default'):
c = Client(profile=profile)
# The engine needs time to start, so check when all the engines are
# connected before take a direct view of the cluster.
# Make sure this desired_engine_num is EXACTLY the same as the engine
# number you initiated with ipengine
desired_engine_num = n_engine
# check if the engines are ready, if the engines are not ready after 1 min,
# something might be wrong. Exit and raise an exception.
waiting=0
while(len(c) < desired_engine_num and waiting<=60):
time.sleep(10)
waiting += 10
if(len(c) != desired_engine_num):
raise Exception('usable engine number is not the same as the desired \
engine number! usable:{0}, desired:{1}.\nCheck your cluster status and the \
desired number set in the Driver script.'.format(len(c),desired_engine_num))
dv = c[:]
with dv.sync_imports():
import sdp.diagnostic.fwr.fwr2d.postprocess
import numpy
dv.execute('pp=sdp.diagnostic.fwr.fwr2d.Postprocess')
dv.execute('np=numpy')
return dv
class Reflectometer_Output_Params:
"""container for non-essential parameters used in Reflectometer_Output
class
:param string file_path: Path to FWR2D/3D output files
:param int n_cross_section: total number of cross-section planes
:param int FWR_dimension: either 2 or 3, default to be 2
:param bool full_load: Optional, default is True. If False, data won't be
loaded during initialization. Pre-saved will be
needed for further use.
:param string receiver_file_name: Optional, filename to the Code5 file
specifying the receiver electric field.
Default is the default file name set by
FWR_Driver script.
"""
def __init__(self,file_path,n_cross_section,FWR_dimension=2,
full_load=True, receiver_file_name='receiver_pattern.txt'):
self.file_path = file_path
self.n_cross_section = n_cross_section
self.FWR_dimension = FWR_dimension
self.full_load = full_load
self.receiver_file_name = receiver_file_name
def single_freq_time(params):
"""single frequency-time run for collecting all cross-section signals,
this function is supposed to be scattered to all engines with different f
and t parameter
params:
A tuple containing the following parameters:
f:
float, frequency in GHz
t:
int, time step number
Ref_param:
Reflectometer_Output_Params object, containing other preset
parameters
Returns:
E_out: (1,1,nc) shaped complex array, the calculated reflected signal
"""
f = params[0]
t = params[1]
Ref_param = params[2]
Ref = pp.Reflectometer_Output(Ref_param.file_path, [f], [t],
Ref_param.n_cross_section,
Ref_param.FWR_dimension, True,
Ref_param.receiver_file_name)
return Ref.E_out
def full_freq_time(freqs, time_arr, Ref_param, dv):
"""Master function to collect all frequencies and time steps reflectometer
signals.
:param freqs: all the frequencies in GHz
:type freqs: array of floats
:param time_arr: all the time steps
:type time_arr: array of ints
:param Ref_param: containing other preset parameters
:type Ref_param: :py:class`Reflectometer_Output_Params` object
:param dv: direct-view of an IPython parallel cluster, obtained by function
dv_initialize()
:returns: Reflectometer_Output object with parameters given by freqs,
time_arr, and Ref_param. Its E_out attribute contains the
corresponding complex signals.
"""
Ref_all = pp.Reflectometer_Output(Ref_param.file_path, freqs, time_arr,
Ref_param.n_cross_section,
Ref_param.FWR_dimension, False,
Ref_param.receiver_file_name)
Ref_all.E_out = np.zeros((Ref_all.NF, Ref_all.NT, Ref_all.n_cross_section),
dtype='complex')
parallel_param_list = [(f,t,Ref_param) for f in freqs for t in time_arr]
parallel_result = dv.map_async(single_freq_time, parallel_param_list)
print('Parallel runs started.')
parallel_result.wait_interactive()
print('All signals computed!')
E_out_scattered = parallel_result.get()
print('signals collected.')
for i in range(Ref_all.NF):
for j in range(Ref_all.NT):
Ref_all.E_out[i, j, :] = E_out_scattered[i*len(time_arr)+j][0, 0,:]
print(('freq {0},time {1} stored.'.format(freqs[i], time_arr[j])))
return Ref_all
|
#!/usr/bin/python
import json
import re
import requests
from fuzzywuzzy import fuzz
from price_changes import get_stock_price_data
GOOGLE_FINANCE_URL = "http://finance.google.com/finance/info?client=ig&q=NSE:"
class MutualFundNavAnalysis:
matched_stocks_data = {}
def __init__(self):
MutualFundNavAnalysis.matched_stocks_data = {}
def get_mf_stock_data(self):
stocks_data = open("stocks-list.json", "r")
stocks = json.load(stocks_data)
stocks_data.close()
return stocks
def get_listed_stocks_dict(self):
#stocks_data = open("new-formatted.json", "r")
stocks_data = open("letter_wise_formatting.json", "r")
stocks = json.load(stocks_data)
stocks_data.close()
return stocks
def get_matched_stocks_list(self):
mf_stocks = self.get_mf_stock_data()
listed_stocks = self.get_listed_stocks_dict()
all_mf_stocks = []
## file to store matched and unmatched stocks data
data_file = open("analysis_data.txt", "w")
for key in mf_stocks.keys():
all_mf_stocks = mf_stocks[key]["stocks-data"]
misc_data = mf_stocks[key]["miscellaneous"]
print "Total stocks in mf: " + key + " : " + str(len(all_mf_stocks))
MutualFundNavAnalysis.matched_stocks_data.setdefault(key, [])
match_count = 0
matched_stocks = []
data_file.write("MATCHED STOCKS\n")
## loop to generate the matching stocks and the stock codes
## which will be used to fetch the stock data from google finance
for stock in all_mf_stocks:
stock_name = stock["stock"]
first_letter = stock_name[0].upper()
## Branch directly to stocks starting letter
## The data for stocks is segregated based on first letter of stock name
for current_stock in listed_stocks[first_letter]:
if self.is_fuzzy_matching_valid(stock_name, current_stock):
data_file.write(stock_name+" ==> "+current_stock+"\n")
match_count += 1
matched_stocks.append(stock_name)
## Prepare data for matched stocks
## stock data appended to matched_stocks_data
## in format: [[STOCK_CODE_1, WEIGHTING_1], [STOCK_CODE_2, WEIGHTING_2], ..]
stock_code = listed_stocks[first_letter][current_stock]
append_data = [stock_name, misc_data["cash_allocation"], stock_code, stock["weighting"]]
self.append_matched_data(key, append_data)
self.dump_matching_analysis(data_file, match_count, all_mf_stocks, matched_stocks)
self.append_price_change_data_in_matched_stocks()
def is_fuzzy_matching_valid(self, stock_name, current_stock):
## Get the token sort ratio from fuzzywuzzy
ratio = fuzz.token_sort_ratio(stock_name, current_stock)
return ratio > 95
def append_matched_data(self, key, append_data):
size = len(MutualFundNavAnalysis.matched_stocks_data[key])
MutualFundNavAnalysis.matched_stocks_data[key].append([])
## add all data
for data in append_data:
MutualFundNavAnalysis.matched_stocks_data[key][size].append(data)
def dump_matching_analysis(self, data_file, match_count, all_mf_stocks, matched_stocks):
#data_file = open("analysis_data.txt", "a")
print "Total matches : " + str(match_count)
print "Stocks not matched are: \n"
data_file.write("\n\nNOT MATCHED STOCKS\n")
## write all non-matched stocks for analysis
for s in all_mf_stocks:
stock = s["stock"]
if stock not in matched_stocks:
print stock
data_file.write(stock+"\n")
print "\n"
data_file.close()
def append_price_change_data_in_matched_stocks(self):
## Fetch all the price changes for matched stocks with stock codes
for key in MutualFundNavAnalysis.matched_stocks_data.keys():
print "\nPrice changes in MF %s \n" % key
for data in MutualFundNavAnalysis.matched_stocks_data[key]:
name = data[0]
code = data[2]
url = GOOGLE_FINANCE_URL + code
received_data = get_stock_price_data(url, name, code)
percent_change = received_data[0]
time = received_data[1]
data.append(percent_change)
data.append(time)
#print matched_stocks_data
with open("change_data.json", "w") as out:
json.dump(MutualFundNavAnalysis.matched_stocks_data, out)
#return matched_stocks_data
def nav_change_analysis(self):
data_file = open("change_data.json", "r")
content = json.load(data_file)
print "Read from file!"
total, total_w, total_change, cash = 0, 0, 0, 0
for key in content.keys():
total, total_w, total_change, cash = 0, 0, 0, 0
for current in content[key]:
# get cash allocation
cash = current[1]
weighting = current[3]
weighting = float(weighting)
total_w += weighting
change = current[4]
change = float(change)
total += (weighting * change)
#print content
#total_change = str((total / 100) + (cash / 100))
total_change = str(total / 100)
print "Expected NAV change for %s :: %s%%" % (key ,total_change)
def get_complete_nav_analysis(self):
self.get_matched_stocks_list()
self.nav_change_analysis()
if __name__ == "__main__":
mf_nav = MutualFundNavAnalysis()
mf_nav.get_complete_nav_analysis()
|
# -*- coding: utf-8 -*-
"""Anima Pipeline Library
Anima uses ``Stalker Configuration Framework``.
To be able to make it work set the STALKER_CONFIG environment variable to a
valid configuration folder (which has a config.py file inside, if there is no
config.py file create one).
Place the following variables in to the config.py file::
database_engine_settings = {
'sqlalchemy.url': 'dialect://user:password@a.b.c.d/stalker',
'sqlalchemy.echo': False,
'sqlalchemy.pool_size': 1,
'sqlalchemy.max_overflow': 3
}
stalker_server_internal_address = 'http://a.b.c.d:xxxx'
stalker_server_external_address = 'http://e.f.g.h:xxxx'
"""
import sys
import os
import stat
import tempfile
import logging
from anima.config import Config
__version__ = "0.6.0"
__string_types__ = []
if sys.version_info[0] >= 3: # Python 3
__string_types__ = tuple([str])
else: # Python 2
__string_types__ = tuple([str, unicode])
# create logger
# logging.basicConfig()
logger = logging.getLogger(__name__)
logging_level = logging.ERROR
logger.setLevel(logging_level)
# create formatter
logging_formatter = logging.Formatter('%(module)s: %(funcName)s: %(levelname)s: %(message)s')
# create file handler
log_file_path = os.path.join(
tempfile.gettempdir(),
'anima.log'
)
log_file_handler = logging.FileHandler(log_file_path)
log_file_handler.setFormatter(logging_formatter)
# add file handler
logger.addHandler(log_file_handler)
# set stalker to use the same logger
# fix file mod for log file
os.chmod(
log_file_path,
stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO -
stat.S_IXUSR - stat.S_IXGRP - stat.S_IXOTH
)
TIMING_RESOLUTION = 10 # in minutes
defaults = Config()
|
# pylint: disable=no-value-for-parameter
from dagster_shell import create_shell_command_op
from dagster import graph
@graph
def my_graph():
a = create_shell_command_op('echo "hello, world!"', name="a")
a()
|
import streamlit as st
import json
import os
import re
import pandas as pd
import numpy as np
import argparse
import requests
import logging
import yaml
import glob
import datetime
import time
from models import clustering
import pyLDAvis
import webbrowser
import base64
import pytest
cluster_amount = 8
additional_stopwords = ['astudio', 'uutiset', 'kotimaan_uutiset']
min_df = 0.0005
max_df = 1
with open("config.yml") as f:
config = yaml.load(f)["local"]
def load_data(path, idx_name):
with open(path) as f:
data = json.load(f)
df = pd.DataFrame(data)
df.set_index(idx_name, inplace=True)
return df
def replace_spaces(stringlist):
return [
re.sub("[!@#$--&'()]", '', word).replace(" ", "_").replace("-", "_")
for word in stringlist
]
def test_run():
return |
import confply.cpp_compiler as cpp_compiler
fallbacks = ["cl", "clang++", "g++", "gcc", "clang"]
tool_name = None
for tool in fallbacks:
if cpp_compiler.is_found(tool):
tool_name = tool
break
def generate():
cpp_compiler.tool = tool_name
return cpp_compiler.generate()
def get_environ():
return cpp_compiler.get_environ()
def handle_args():
cpp_compiler.handle_args()
def is_found():
return bool(tool_name)
|
import tkinter as tk
from PIL import Image, ImageTk
import pygame
import time as delay
from openpyxl import *
from modules import janelaconfig
import os
from tkinter.messagebox import showwarning
# Classe destinada a visualização da database.
class MostraData(tk.Toplevel):
# Constroi o objeto MostraData.
def __init__(self):
super().__init__()
# Define propriedades do toplevel.
MostraData.title(self, 'Visualizando Database')
MostraData.iconbitmap(self, './resources/img/icons/mainicon.ico')
MostraData.configure(self, bg='#185c37')
# Configura toplevel do formulário.
conf_toplevel = janelaconfig.Janela(master=MostraData)
conf_toplevel.centraliza_tamanho(janela=self)
MostraData.grid_rowconfigure(self, 0, weight=1)
MostraData.grid_columnconfigure(self, 0, weight=1)
# Frame destinado as informações da database.
frame_db = tk.Frame(self, bg='#185c37')
frame_db.grid(sticky='', padx=2, pady=2)
# Define funções que carrega e atualiza GUI com as infos da database.
def atualiza_dados():
if os.path.exists('./database/database.xlsx'):
database = load_workbook('./database/database.xlsx')
formcadastro = database.active
else:
database = Workbook()
formcadastro = database.active
# Cria variáveis para cada coluna.
coluna_a = formcadastro['A']
coluna_b = formcadastro['B']
coluna_c = formcadastro['C']
coluna_d = formcadastro['D']
coluna_e = formcadastro['E']
coluna_f = formcadastro['F']
coluna_g = formcadastro['G']
coluna_h = formcadastro['H']
# Atualiza o conteudo de cada label.
lista = ''
for cell in coluna_a:
lista = f'{lista + str(cell.value)}\n'
label_a.config(text=lista)
lista2 = ''
for cell in coluna_b:
lista2 = f'{lista2 + str(cell.value)}\n'
label_b.config(text=lista2)
lista3 = ''
for cell in coluna_c:
lista3 = f'{lista3 + str(cell.value)}\n'
label_c.config(text=lista3)
lista4 = ''
for cell in coluna_d:
lista4 = f'{lista4 + str(cell.value)}\n'
label_d.config(text=lista4)
lista5 = ''
for cell in coluna_e:
lista5 = f'{lista5 + str(cell.value)}\n'
label_e.config(text=lista5)
lista6 = ''
for cell in coluna_f:
lista6 = f'{lista6 + str(cell.value)}\n'
label_f.config(text=lista6)
lista7 = ''
for cell in coluna_g:
lista7 = f'{lista7 + str(cell.value)}\n'
label_g.config(text=lista7)
lista8 = ''
for cell in coluna_h:
lista8 = f'{lista8 + str(cell.value)}\n'
label_h.config(text=lista8)
def informa_existencia():
if os.path.exists('./database/database.xlsx'):
return True
else:
showwarning('Atenção!', 'Para exibição correta da database, realize no mínimo '
'um cadastro.', parent=self)
# Botões
# Icone do Botão Mostrar Database
icon_mostrarabre = Image.open('./resources/img/icons/imprimir.ico') # Carrega icone do botão.
icon_mostrarabre = icon_mostrarabre.resize((30, 30), Image.ANTIALIAS) # Redimensiona o icone.
mostrar_icon = ImageTk.PhotoImage(icon_mostrarabre) # Define icone do botão.
mostrar_db = tk.Button(self,
text='Exibir Database',
compound='left',
image=mostrar_icon,
padx=10,
bg='#d5fdec',
font='Roboto 12 bold',
width=145,
height=50,
bd=5,
relief='raised',
command=lambda: [on_click(), atualiza_dados(), informa_existencia()]
)
mostrar_db.image = mostrar_icon
mostrar_db.grid(row=0, column=0, sticky='se')
# Icone do Botão Voltar
icon_voltarabre = Image.open('./resources/img/icons/returnmenu.ico') # Carrega icone do botão.
icon_voltarabre = icon_voltarabre.resize((30, 30), Image.ANTIALIAS) # Redimensiona o icone.
voltar_icon = ImageTk.PhotoImage(icon_voltarabre) # Define icone do botão.
voltar_menu = tk.Button(self,
text='Voltar',
compound='left',
image=voltar_icon,
padx=10,
bg='#d5fdec',
font='Roboto 12 bold',
width=120,
height=50,
bd=5,
relief='raised',
command=lambda: [retorna_menu(), self.destroy()]
)
voltar_menu.image = voltar_icon
voltar_menu.grid(row=0, column=0, sticky='sw')
# Labels com informações da database.
label_a = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_a.grid(row=1, column=1, sticky='s')
label_b = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_b.grid(row=1, column=2, sticky='s')
label_c = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_c.grid(row=1, column=3, sticky='s')
label_d = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_d.grid(row=1, column=4, sticky='s')
label_e = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_e.grid(row=1, column=5, sticky='s')
label_f = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_f.grid(row=1, column=6, sticky='s')
label_g = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_g.grid(row=1, column=7, sticky='s')
label_h = tk.Label(frame_db, text='', font='Roboto 8 bold', bg='#185c37', fg='#ffffff',
relief='ridge')
label_h.grid(row=1, column=8, sticky='s')
# Funções Gerais
# Som do click no botão.
def click_sound():
pygame.init()
pygame.mixer.music.load('./resources/sound/mouseclick.wav')
pygame.mixer.music.play()
pygame.event.wait(timeout=1)
# Realiza ações ao clicar.
def on_click():
click_sound()
# Retorna a janela inicial.
def retorna_menu():
on_click()
delay.sleep(0.1)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.v2.dsl.dsl_utils."""
import unittest
from kfp.v2.dsl import dsl_utils
from kfp.pipeline_spec import pipeline_spec_pb2
from google.protobuf import json_format
class _DummyClass(object):
pass
class DslUtilsTest(unittest.TestCase):
def test_sanitize_component_name(self):
self.assertEqual('comp-my-component',
dsl_utils.sanitize_component_name('My component'))
def test_sanitize_executor_label(self):
self.assertEqual('exec-my-component',
dsl_utils.sanitize_executor_label('My component'))
def test_sanitize_task_name(self):
self.assertEqual('task-my-component-1',
dsl_utils.sanitize_task_name('My component 1'))
def test_get_ir_value(self):
self.assertDictEqual(
json_format.MessageToDict(pipeline_spec_pb2.Value(int_value=42)),
json_format.MessageToDict(dsl_utils.get_value(42)))
self.assertDictEqual(
json_format.MessageToDict(pipeline_spec_pb2.Value(double_value=12.2)),
json_format.MessageToDict(dsl_utils.get_value(12.2)))
self.assertDictEqual(
json_format.MessageToDict(
pipeline_spec_pb2.Value(string_value='hello world')),
json_format.MessageToDict(dsl_utils.get_value('hello world')))
with self.assertRaisesRegex(TypeError, 'Got unexpected type'):
dsl_utils.get_value(_DummyClass())
def test_remove_task_name_prefix(self):
self.assertEqual('my-component',
dsl_utils.remove_task_name_prefix('task-my-component'))
with self.assertRaises(AssertionError):
dsl_utils.remove_task_name_prefix('my-component')
if __name__ == '__main__':
unittest.main()
|
import json
from django.shortcuts import render
from django.utils.safestring import mark_safe
def index(request):
return render(request, 'basic_system/index.html', {})
def room(request, room_name):
return render(request, 'basic_system/room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
|
"""
See https://mathworld.wolfram.com/TalismanSquare.html
An n x n array of the integers from 1 to n^2 such that the difference between any integer
and its neighbor (horizontally, vertically, or diagonally, without wrapping around)
is greater than or equal to some value k is called a (n,k)-talisman square.
Examples of Execution:
python3 Talisman.py
python3 Talisman.py -data=[5,4]
"""
from pycsp3 import *
n, k = data or (4, 2)
limit = (n * (n * n + 1)) // 2
# x[i][j] is the value in the talisman square at row i and column j
x = VarArray(size=[n, n], dom=range(1, n * n + 1))
satisfy(
# all values must be different
AllDifferent(x),
# the distance between two neighbouring cells must be strictly greater than k
[
[abs(x[i][j] - x[i][j + 1]) > k for i in range(n) for j in range(n - 1)],
[abs(x[i][j] - x[i + 1][j]) > k for j in range(n) for i in range(n - 1)],
[abs(dgn[i] - dgn[i + 1]) > k for dgn in diagonals_down(x) for i in range(len(dgn) - 1)],
[abs(dgn[i] - dgn[i + 1]) > k for dgn in diagonals_up(x) for i in range(len(dgn) - 1)]
],
# tag(symmetry-breaking)
x[0][0] == 1
)
|
import time
def fib_lin(x):
a, b = 0, 1
for i in range(x+1):
a, b = b, a + b
return a
x= int(input("Give the number of fibonnacci terms: "))
for x in range(1,x+1):
start=time.time()
print(x,":",fib_lin(x))
stop=time.time()
print("total time taken to this program is {} sec".format(stop-start))
|
"""Jobs for the Lifecycle Management plugin."""
from datetime import datetime
from nautobot.dcim.models import Device, InventoryItem
from nautobot.extras.jobs import Job
from nautobot_device_lifecycle_mgmt import choices
from nautobot_device_lifecycle_mgmt.models import (
DeviceSoftwareValidationResult,
InventoryItemSoftwareValidationResult,
)
from nautobot_device_lifecycle_mgmt.software import DeviceSoftware, InventoryItemSoftware
name = "Device/Software Lifecycle Reporting" # pylint: disable=invalid-name
class DeviceSoftwareValidationFullReport(Job):
"""Checks if devices run validated software version."""
name = "Device Software Validation Report"
description = "Validates software version on devices."
read_only = False
class Meta: # pylint: disable=too-few-public-methods
"""Meta class for the job."""
commit_default = True
def test_device_software_validity(self) -> None:
"""Check if software assigned to each device is valid. If no software is assigned return warning message."""
devices = Device.objects.all()
job_run_time = datetime.now()
for device in devices:
device_software = DeviceSoftware(device)
validate_obj, _ = DeviceSoftwareValidationResult.objects.get_or_create(device=device)
validate_obj.is_validated = device_software.validate_software()
validate_obj.software = device_software.software
validate_obj.last_run = job_run_time
validate_obj.run_type = choices.ReportRunTypeChoices.REPORT_FULL_RUN
validate_obj.validated_save()
self.log_success(message=f"Performed validation on: {devices.count()} devices.")
class InventoryItemSoftwareValidationFullReport(Job):
"""Checks if inventory items run validated software version."""
name = "Inventory Item Software Validation Report"
description = "Validates software version on inventory items."
read_only = False
class Meta: # pylint: disable=too-few-public-methods
"""Meta class for the job."""
commit_default = True
def test_inventory_item_software_validity(self):
"""Check if software assigned to each inventory item is valid. If no software is assigned return warning message."""
inventory_items = InventoryItem.objects.all()
job_run_time = datetime.now()
for inventoryitem in inventory_items:
inventoryitem_software = InventoryItemSoftware(inventoryitem)
validate_obj, _ = InventoryItemSoftwareValidationResult.objects.get_or_create(inventory_item=inventoryitem)
validate_obj.is_validated = inventoryitem_software.validate_software()
validate_obj.software = inventoryitem_software.software
validate_obj.last_run = job_run_time
validate_obj.run_type = choices.ReportRunTypeChoices.REPORT_FULL_RUN
validate_obj.validated_save()
self.log_success(message=f"Performed validation on: {inventory_items.count()} inventory items.")
|
"""
Report template model definitions
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import logging
import datetime
from django.urls import reverse
from django.db import models
from django.conf import settings
from django.core.exceptions import ValidationError, FieldError
from django.template.loader import render_to_string
from django.template import Template, Context
from django.core.files.storage import FileSystemStorage
from django.core.validators import FileExtensionValidator
import build.models
import common.models
import part.models
import stock.models
import order.models
from InvenTree.helpers import validateFilterString
from django.utils.translation import gettext_lazy as _
try:
from django_weasyprint import WeasyTemplateResponseMixin
except OSError as err:
print("OSError: {e}".format(e=err))
print("You may require some further system packages to be installed.")
sys.exit(1)
logger = logging.getLogger("inventree")
class ReportFileUpload(FileSystemStorage):
"""
Custom implementation of FileSystemStorage class.
When uploading a report (or a snippet / asset / etc),
it is often important to ensure the filename is not arbitrarily *changed*,
if the name of the uploaded file is identical to the currently stored file.
For example, a snippet or asset file is referenced in a template by filename,
and we do not want that filename to change when we upload a new *version*
of the snippet or asset file.
This uploader class performs the following pseudo-code function:
- If the model is *new*, proceed as normal
- If the model is being updated:
a) If the new filename is *different* from the existing filename, proceed as normal
b) If the new filename is *identical* to the existing filename, we want to overwrite the existing file
"""
def get_available_name(self, name, max_length=None):
return super().get_available_name(name, max_length)
def rename_template(instance, filename):
return instance.rename_file(filename)
def validate_stock_item_report_filters(filters):
"""
Validate filter string against StockItem model
"""
return validateFilterString(filters, model=stock.models.StockItem)
def validate_part_report_filters(filters):
"""
Validate filter string against Part model
"""
return validateFilterString(filters, model=part.models.Part)
def validate_build_report_filters(filters):
"""
Validate filter string against Build model
"""
return validateFilterString(filters, model=build.models.Build)
def validate_purchase_order_filters(filters):
"""
Validate filter string against PurchaseOrder model
"""
return validateFilterString(filters, model=order.models.PurchaseOrder)
def validate_sales_order_filters(filters):
"""
Validate filter string against SalesOrder model
"""
return validateFilterString(filters, model=order.models.SalesOrder)
class WeasyprintReportMixin(WeasyTemplateResponseMixin):
"""
Class for rendering a HTML template to a PDF.
"""
pdf_filename = 'report.pdf'
pdf_attachment = True
def __init__(self, request, template, **kwargs):
self.request = request
self.template_name = template
self.pdf_filename = kwargs.get('filename', 'report.pdf')
class ReportBase(models.Model):
"""
Base class for uploading html templates
"""
class Meta:
abstract = True
def save(self, *args, **kwargs):
# Increment revision number
self.revision += 1
super().save()
def __str__(self):
return "{n} - {d}".format(n=self.name, d=self.description)
@classmethod
def getSubdir(cls):
return ''
def rename_file(self, filename):
# Function for renaming uploaded file
filename = os.path.basename(filename)
return os.path.join('report', 'report_template', self.getSubdir(), filename)
@property
def extension(self):
return os.path.splitext(self.template.name)[1].lower()
@property
def template_name(self):
"""
Returns the file system path to the template file.
Required for passing the file to an external process
"""
template = self.template.name
template = template.replace('/', os.path.sep)
template = template.replace('\\', os.path.sep)
template = os.path.join(settings.MEDIA_ROOT, template)
return template
name = models.CharField(
blank=False, max_length=100,
verbose_name=_('Name'),
help_text=_('Template name'),
)
template = models.FileField(
upload_to=rename_template,
verbose_name=_('Template'),
help_text=_("Report template file"),
validators=[FileExtensionValidator(allowed_extensions=['html', 'htm'])],
)
description = models.CharField(
max_length=250,
verbose_name=_('Description'),
help_text=_("Report template description")
)
revision = models.PositiveIntegerField(
default=1,
verbose_name=_("Revision"),
help_text=_("Report revision number (auto-increments)"),
editable=False,
)
class ReportTemplateBase(ReportBase):
"""
Reporting template model.
Able to be passed context data
"""
# Pass a single top-level object to the report template
object_to_print = None
def get_context_data(self, request):
"""
Supply context data to the template for rendering
"""
return {}
def context(self, request):
"""
All context to be passed to the renderer.
"""
# Generate custom context data based on the particular report subclass
context = self.get_context_data(request)
context['base_url'] = common.models.InvenTreeSetting.get_setting('INVENTREE_BASE_URL')
context['date'] = datetime.datetime.now().date()
context['datetime'] = datetime.datetime.now()
context['default_page_size'] = common.models.InvenTreeSetting.get_setting('REPORT_DEFAULT_PAGE_SIZE')
context['report_description'] = self.description
context['report_name'] = self.name
context['report_revision'] = self.revision
context['request'] = request
context['user'] = request.user
return context
def generate_filename(self, request, **kwargs):
"""
Generate a filename for this report
"""
template_string = Template(self.filename_pattern)
ctx = self.context(request)
context = Context(ctx)
return template_string.render(context)
def render_as_string(self, request, **kwargs):
"""
Render the report to a HTML string.
Useful for debug mode (viewing generated code)
"""
return render_to_string(self.template_name, self.context(request), request)
def render(self, request, **kwargs):
"""
Render the template to a PDF file.
Uses django-weasyprint plugin to render HTML template against Weasyprint
"""
# TODO: Support custom filename generation!
# filename = kwargs.get('filename', 'report.pdf')
# Render HTML template to PDF
wp = WeasyprintReportMixin(
request,
self.template_name,
base_url=request.build_absolute_uri("/"),
presentational_hints=True,
filename=self.generate_filename(request),
**kwargs)
return wp.render_to_response(
self.context(request),
**kwargs)
filename_pattern = models.CharField(
default="report.pdf",
verbose_name=_('Filename Pattern'),
help_text=_('Pattern for generating report filenames'),
max_length=100,
)
enabled = models.BooleanField(
default=True,
verbose_name=_('Enabled'),
help_text=_('Report template is enabled'),
)
class Meta:
abstract = True
class TestReport(ReportTemplateBase):
"""
Render a TestReport against a StockItem object.
"""
@staticmethod
def get_api_url():
return reverse('api-stockitem-testreport-list')
@classmethod
def getSubdir(cls):
return 'test'
filters = models.CharField(
blank=True,
max_length=250,
verbose_name=_('Filters'),
help_text=_("StockItem query filters (comma-separated list of key=value pairs)"),
validators=[
validate_stock_item_report_filters
]
)
include_installed = models.BooleanField(
default=False,
verbose_name=_('Include Installed Tests'),
help_text=_('Include test results for stock items installed inside assembled item')
)
def matches_stock_item(self, item):
"""
Test if this report template matches a given StockItem objects
"""
try:
filters = validateFilterString(self.filters)
items = stock.models.StockItem.objects.filter(**filters)
except (ValidationError, FieldError):
return False
# Ensure the provided StockItem object matches the filters
items = items.filter(pk=item.pk)
return items.exists()
def get_context_data(self, request):
stock_item = self.object_to_print
return {
'stock_item': stock_item,
'serial': stock_item.serial,
'part': stock_item.part,
'results': stock_item.testResultMap(include_installed=self.include_installed),
'result_list': stock_item.testResultList(include_installed=self.include_installed)
}
class BuildReport(ReportTemplateBase):
"""
Build order / work order report
"""
@staticmethod
def get_api_url():
return reverse('api-build-report-list')
@classmethod
def getSubdir(cls):
return 'build'
filters = models.CharField(
blank=True,
max_length=250,
verbose_name=_('Build Filters'),
help_text=_('Build query filters (comma-separated list of key=value pairs'),
validators=[
validate_build_report_filters,
]
)
def get_context_data(self, request):
"""
Custom context data for the build report
"""
my_build = self.object_to_print
if not type(my_build) == build.models.Build:
raise TypeError('Provided model is not a Build object')
return {
'build': my_build,
'part': my_build.part,
'bom_items': my_build.part.get_bom_items(),
'reference': my_build.reference,
'quantity': my_build.quantity,
'title': str(my_build),
}
class BillOfMaterialsReport(ReportTemplateBase):
"""
Render a Bill of Materials against a Part object
"""
@staticmethod
def get_api_url():
return reverse('api-bom-report-list')
@classmethod
def getSubdir(cls):
return 'bom'
filters = models.CharField(
blank=True,
max_length=250,
verbose_name=_('Part Filters'),
help_text=_('Part query filters (comma-separated list of key=value pairs'),
validators=[
validate_part_report_filters
]
)
def get_context_data(self, request):
part = self.object_to_print
return {
'part': part,
'category': part.category,
'bom_items': part.get_bom_items(),
}
class PurchaseOrderReport(ReportTemplateBase):
"""
Render a report against a PurchaseOrder object
"""
@staticmethod
def get_api_url():
return reverse('api-po-report-list')
@classmethod
def getSubdir(cls):
return 'purchaseorder'
filters = models.CharField(
blank=True,
max_length=250,
verbose_name=_('Filters'),
help_text=_('Purchase order query filters'),
validators=[
validate_purchase_order_filters,
]
)
def get_context_data(self, request):
order = self.object_to_print
return {
'description': order.description,
'lines': order.lines,
'order': order,
'reference': order.reference,
'supplier': order.supplier,
'prefix': common.models.InvenTreeSetting.get_setting('PURCHASEORDER_REFERENCE_PREFIX'),
'title': str(order),
}
class SalesOrderReport(ReportTemplateBase):
"""
Render a report against a SalesOrder object
"""
@staticmethod
def get_api_url():
return reverse('api-so-report-list')
@classmethod
def getSubdir(cls):
return 'salesorder'
filters = models.CharField(
blank=True,
max_length=250,
verbose_name=_('Filters'),
help_text=_('Sales order query filters'),
validators=[
validate_sales_order_filters
]
)
def get_context_data(self, request):
order = self.object_to_print
return {
'customer': order.customer,
'description': order.description,
'lines': order.lines,
'order': order,
'prefix': common.models.InvenTreeSetting.get_setting('SALESORDER_REFERENCE_PREFIX'),
'reference': order.reference,
'title': str(order),
}
def rename_snippet(instance, filename):
filename = os.path.basename(filename)
path = os.path.join('report', 'snippets', filename)
# If the snippet file is the *same* filename as the one being uploaded,
# delete the original one from the media directory
if str(filename) == str(instance.snippet):
fullpath = os.path.join(settings.MEDIA_ROOT, path)
fullpath = os.path.abspath(fullpath)
if os.path.exists(fullpath):
logger.info(f"Deleting existing snippet file: '{filename}'")
os.remove(fullpath)
return path
class ReportSnippet(models.Model):
"""
Report template 'snippet' which can be used to make templates
that can then be included in other reports.
Useful for 'common' template actions, sub-templates, etc
"""
snippet = models.FileField(
upload_to=rename_snippet,
verbose_name=_('Snippet'),
help_text=_('Report snippet file'),
validators=[FileExtensionValidator(allowed_extensions=['html', 'htm'])],
)
description = models.CharField(max_length=250, verbose_name=_('Description'), help_text=_("Snippet file description"))
def rename_asset(instance, filename):
filename = os.path.basename(filename)
path = os.path.join('report', 'assets', filename)
# If the asset file is the *same* filename as the one being uploaded,
# delete the original one from the media directory
if str(filename) == str(instance.asset):
fullpath = os.path.join(settings.MEDIA_ROOT, path)
fullpath = os.path.abspath(fullpath)
if os.path.exists(fullpath):
logger.info(f"Deleting existing asset file: '{filename}'")
os.remove(fullpath)
return path
class ReportAsset(models.Model):
"""
Asset file for use in report templates.
For example, an image to use in a header file.
Uploaded asset files appear in MEDIA_ROOT/report/assets,
and can be loaded in a template using the {% report_asset <filename> %} tag.
"""
def __str__(self):
return os.path.basename(self.asset.name)
asset = models.FileField(
upload_to=rename_asset,
verbose_name=_('Asset'),
help_text=_("Report asset file"),
)
description = models.CharField(max_length=250, verbose_name=_('Description'), help_text=_("Asset file description"))
|
"""
AMG solvers
"""
postpone_import = 1
|
from arcgis import GIS
from arcgis.features import GeoAccessor, GeoSeriesAccessor
import arcpy
from arcpy import env
from arcpy.sa import *
import numpy as np
import os
import pandas as pd
#####
arcpy.env.overwriteOutput = True
arcpy.CheckOutExtension("Spatial")
def select_feature_by_attributes_arcgis(input,Attri_NM,Attri_v,output):
where_clause = '"%s" IN' % (Attri_NM)
where_clause = where_clause + " ("
for i in range(0,len(Attri_v)):
if i == 0:
where_clause = where_clause + str(Attri_v[i])
else:
where_clause = where_clause + "," + str(Attri_v[i])
where_clause = where_clause + ")"
arcpy.Select_analysis(input, output, where_clause)
return
##################
def Remove_Unselected_Lake_Attribute_In_Finalcatinfo_Arcgis(finalcat_ply, Conn_Lake_Ids):
"""Functions will set lake id not in Conn_Lake_Ids to -1.2345 in attribute
table of Path_Finalcatinfo
----------
Notes
-------
Returns:
-------
None, the attribute table of Path_shpfile will be updated
"""
mask1 = np.logical_not(finalcat_ply['HyLakeId'].isin(Conn_Lake_Ids))
mask2 = finalcat_ply['Lake_Cat'] != 2
mask = np.logical_and(mask1,mask2)
finalcat_ply.loc[mask,'HyLakeId'] = 0
finalcat_ply.loc[mask,'LakeVol'] = 0
finalcat_ply.loc[mask,'LakeArea'] = 0
finalcat_ply.loc[mask,'LakeDepth'] = 0
finalcat_ply.loc[mask,'Laketype'] =0
finalcat_ply.loc[mask,'Lake_Cat'] = 0
return finalcat_ply
def save_modified_attributes_to_outputs(mapoldnew_info,tempfolder,OutputFolder,cat_name,riv_name,Path_final_riv,dis_col_name='SubId'):
mapoldnew_info.spatial.to_featureclass(location=os.path.join(tempfolder,'updateattri.shp'),overwrite=True,sanitize_columns=False)
arcpy.Dissolve_management(os.path.join(tempfolder,'updateattri.shp'), os.path.join(OutputFolder,cat_name), [dis_col_name])
arcpy.JoinField_management(os.path.join(OutputFolder,cat_name), dis_col_name, os.path.join(tempfolder,'updateattri.shp'), dis_col_name)
arcpy.DeleteField_management(os.path.join(OutputFolder,cat_name),
["SubId_1", "Id","nsubid2", "nsubid","ndownsubid","Old_SubId","Old_DowSub","Join_Count","TARGET_FID","Id","SubID_Oldr","HRU_ID_N_1","HRU_ID_N_2","facters"]
)
if riv_name != '#':
arcpy.CalculateGeometryAttributes_management(os.path.join(OutputFolder, cat_name), [["centroid_x", "CENTROID_X"], ["centroid_y", "CENTROID_Y"]])
cat_colnms = mapoldnew_info.columns
drop_cat_colnms = cat_colnms[cat_colnms.isin(["SHAPE","SubId_1", "Id","nsubid2", "nsubid","ndownsubid","Old_DowSub","Join_Count","TARGET_FID","Id","SubID_Oldr","HRU_ID_N_1","HRU_ID_N_2","facters","Old_DowSubId"])]
cat_pd = mapoldnew_info.drop(columns=drop_cat_colnms)
riv_pd = pd.DataFrame.spatial.from_featureclass(Path_final_riv)
riv_pd['Old_SubId'] = riv_pd['SubId']
# remove all columns
riv_pd = riv_pd[['SHAPE','Old_SubId']]
riv_pd = pd.merge(riv_pd, cat_pd, on='Old_SubId', how='left')
riv_pd = riv_pd.drop(columns=['Old_SubId'])
riv_pd.spatial.to_featureclass(location=os.path.join(tempfolder,'riv_attri.shp'),overwrite=True,sanitize_columns=False)
arcpy.Dissolve_management(os.path.join(tempfolder,'riv_attri.shp'), os.path.join(OutputFolder,riv_name), ["SubId"])
arcpy.JoinField_management(os.path.join(OutputFolder,riv_name), "SubId", os.path.join(tempfolder,'riv_attri.shp'), "SubId")
arcpy.DeleteField_management(os.path.join(OutputFolder,riv_name),
["SubId_1", "Id","nsubid2", "nsubid","ndownsubid","Old_SubId","Old_DowSub","Join_Count","TARGET_FID","Id","SubID_Oldr","HRU_ID_N_1","HRU_ID_N_2","facters"]
)
def clean_attribute_name_arcgis(table,names):
remove_column_names = table.columns[np.logical_not(np.isin(table.columns,names))]
table = table.drop(columns=remove_column_names)
return table
|
from django.db import models
class Notification(models.Model):
title = models.CharField(max_length=255, null=True)
description = models.CharField(max_length=100000, null=True)
notification_type = models.CharField(max_length=255, null=True)
date_time = models.DateTimeField(auto_now_add=True)
is_deleted = models.BooleanField(default=False)
is_read = models.BooleanField(default=False)
role = models.CharField(max_length=255, null=True)
user_id = models.CharField(max_length=255, null=True)
session_id = models.BigIntegerField(null=True)
def __str__(self):
return self.title
class Meta:
db_table = "notification"
class Session(models.Model):
address = models.CharField(max_length=255, null=True)
is_deleted = models.BooleanField(default=False, null=True)
program_name = models.CharField(max_length=255, null=True)
session_creator = models.CharField(max_length=255, null=True)
session_description = models.TextField(null=True)
session_end_date = models.CharField(max_length=255, null=True)
session_name = models.CharField(max_length=255, null=True)
session_start_date = models.CharField(max_length=255, null=True)
session_status = models.IntegerField()
program_id = models.BigIntegerField(null=True)
topic_id = models.BigIntegerField(null=True)
training_organization = models.CharField(max_length=255, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.session_name
class Meta:
db_table = "session"
class Attendance(models.Model):
attestation_url = models.CharField(max_length=255, null=True)
deleted = models.BooleanField(default=False)
is_scan_in = models.BooleanField(default=False, null=True)
is_scan_out = models.BooleanField(default=False, null=True)
role = models.CharField(max_length=255, null=True)
scan_in_date_time = models.CharField(max_length=255, null=True)
scan_out_date_time = models.CharField(max_length=255, null=True)
session_id = models.BigIntegerField(null=True)
user_id = models.CharField(max_length=255, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.attestation_url
class Meta:
db_table = "attendance"
class Session_Links(models.Model):
session_url = models.CharField(max_length=255, null=True)
user_id = models.CharField(max_length=255, null=True)
session_id = models.BigIntegerField(null=True)
def __str__(self):
return self.session_url
class Meta:
db_table = "session_links"
|
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
# See https://github.com/PaulGilmartin/graph_wrap/issues/5 for csrf_exempt
# rationale.
@csrf_exempt
def graphql_view(request):
from graph_wrap.django_rest_framework import schema
schema = schema()
view = GraphQLView.as_view(schema=schema)
return view(request)
|
# -*- coding: utf-8 -*-
from django.test import TestCase, RequestFactory, Client
from django_webtest import WebTest
from app.tests.mixins import AuthRouteTestingWithKwargs
from app.tests.mixins import Pep8ViewsTests
from app.models import User, Schedule
import app.views as views
pto = views.pto_views
class PasswordResetPep8Tests(TestCase, Pep8ViewsTests):
def setUp(self):
self.path = 'app/views/users/paid_time_off/'
class UserPTOIndexRoutingAndHttpTests(TestCase, AuthRouteTestingWithKwargs):
"""
Tests checking that that '/users/:user_id/paid_time_off' properly handles HttpRequests and routing
Accepts GET and POST requests and refuses all others with an error code 405 (Method not allowed)
Tested on id #55
"""
def setUp(self):
self.factory = RequestFactory()
self.client = Client()
self.route_name = 'app:pto_index'
self.route = '/users/55/paid_time_off'
self.view = pto.index
self.responses = {
'exists': 200,
'GET': 200,
'POST': 200,
'PUT': 405,
'PATCH': 405,
'DELETE': 405,
'HEAD': 405,
'OPTIONS': 405,
'TRACE': 405
}
self.kwargs = {'user_id': 55}
self.expected_response_content = 'PTO Calendar'
AuthRouteTestingWithKwargs.__init__(self)
self.mixin_model.build_benefits_data()
def test_route_post(self):
if (self.responses['POST'] != 200):
super(AuthRouteTestingWithKwargs, self).test_route_post()
else:
self.expected_response_content = 'No form found'
request = self.factory.post(self.route)
self.no_auth_test(request)
self.good_auth_test(request)
self.old_auth_test(request)
self.bad_password_test(request)
self.bad_email_test(request)
class UserViewsPTOTests(WebTest):
def setUp(self):
# First signup and login a user
self.param = {'email': 'ziyang@example.com', 'first_name': 'ziyang',
'last_name': 'wang', 'password': 'ziyangw',
'confirm': 'ziyangw'}
signup_page = self.app.get('/signup/')
signup_form = signup_page.forms[0]
signup_form.set('email', self.param['email'])
signup_form.set('first_name', self.param['first_name'])
signup_form.set('last_name', self.param['last_name'])
signup_form.set('password', self.param['password'])
signup_form.set('confirm', self.param['confirm'])
signup_form.submit()
self.user = User.objects.filter(email=self.param['email']).first()
# Setting up PTO form
self.url = '/users/%s/paid_time_off/' % self.user.user_id
pto_page = self.app.get(self.url)
self.assertEqual(len(pto_page.forms), 1)
self.form = pto_page.forms[0]
def test_empty_field(self):
response = self.form.submit()
self.assertEqual(response.url, self.url)
response_message = response._headers['Set-Cookie']
event_name_empty_msg = "Event Name cannot be empty"
event_desc_empty_msg = "Event Description cannot be empty"
event_date_empty_msg = "Event Dates cannot be empty"
self.assertTrue(event_name_empty_msg in response_message)
self.assertTrue(event_desc_empty_msg in response_message)
self.assertTrue(event_date_empty_msg in response_message)
def test_success_PTO_submit(self):
self.form.set('event_name', 'China')
self.form.set('event_description', 'Travel to China')
self.form.set('date_begin', '07/01/2017')
self.form.set('date_end', '07/15/2017')
response = self.form.submit()
response_message = response._headers['Set-Cookie']
schedule = Schedule.objects.filter(event_name='China').first()
self.assertEqual(response.url, self.url)
self.assertTrue(schedule)
self.assertEqual(schedule.event_desc, 'Travel to China')
self.assertTrue("Information successfully updated" in response_message)
schedule.delete()
|
#!/usr/bin/env python
"""Web server for the Trendy Lights application.
The overall architecture looks like:
server.py script.js
______ ____________ _________
| | | | | |
| EE | <-> | App Engine | <-> | Browser |
|______| |____________| |_________|
\ /
'- - - - - - - - - - - - - - -'
The code in this file runs on App Engine. It's called when the user loads the
web page and when details about a polygon are requested.
Our App Engine code does most of the communication with EE. It uses the
EE Python library and the service account specified in config.py. The
exception is that when the browser loads map tiles it talks directly with EE.
The basic flows are:
1. Initial page load
When the user first loads the application in their browser, their request is
routed to the get() function in the MainHandler class by the framework we're
using, webapp2.
The get() function sends back the main web page (from index.html) along
with information the browser needs to render an Earth Engine map and
the IDs of the polygons to show on the map. This information is injected
into the index.html template through a templating engine called Jinja2,
which puts information from the Python context into the HTML for the user's
browser to receive.
Note: The polygon IDs are determined by looking at the static/polygons
folder. To add support for another polygon, just add another GeoJSON file to
that folder.
2. Getting details about a polygon
When the user clicks on a polygon, our JavaScript code (in static/script.js)
running in their browser sends a request to our backend. webapp2 routes this
request to the get() method in the DetailsHandler.
This method checks to see if the details for this polygon are cached. If
yes, it returns them right away. If no, we generate a Wikipedia URL and use
Earth Engine to compute the brightness trend for the region. We then store
these results in a cache and return the result.
Note: The brightness trend is a list of points for the chart drawn by the
Google Visualization API in a time series e.g. [[x1, y1], [x2, y2], ...].
Note: memcache, the cache we are using, is a service provided by App Engine
that temporarily stores small values in memory. Using it allows us to avoid
needlessly requesting the same data from Earth Engine over and over again,
which in turn helps us avoid exceeding our quota and respond to user
requests more quickly.
"""
import json
import os
import config
import ee
import jinja2
import webapp2
from google.appengine.api import memcache
###############################################################################
# Web request handlers. #
###############################################################################
class MainHandler(webapp2.RequestHandler):
"""A servlet to handle requests to load the main Trendy Lights web page."""
def get(self, path=''):
"""Returns the main web page, populated with EE map and polygon info."""
mapid = GetTrendyMapId()
template_values = {
'eeMapId': mapid['mapid'],
'eeToken': mapid['token'],
'serializedPolygonIds': json.dumps(POLYGON_IDS)
}
template = JINJA2_ENVIRONMENT.get_template('index.html')
self.response.out.write(template.render(template_values))
class DetailsHandler(webapp2.RequestHandler):
"""A servlet to handle requests for details about a Polygon."""
def get(self):
"""Returns details about a polygon."""
polygon_id = self.request.get('polygon_id')
if polygon_id in POLYGON_IDS:
content = GetPolygonTimeSeries(polygon_id)
else:
content = json.dumps({'error': 'Unrecognized polygon ID: ' + polygon_id})
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(content)
# Define webapp2 routing from URL paths to web request handlers. See:
# http://webapp-improved.appspot.com/tutorials/quickstart.html
app = webapp2.WSGIApplication([
('/details', DetailsHandler),
('/', MainHandler),
])
###############################################################################
# Helpers. #
###############################################################################
# Endmembers derived from the Auscover Field Sites Database using attached ipythron notebook
# Overall global RMSE unmixing error is 13.1%
# RMSE of the derived fractions against 675 field sites is:
# Bare: 0.11959856
# Dead: 0.14945009
# Green: 0.12286588
# Builds Interactive Terms
def applytransforms(mcd43Image):
# Select the algorithm bands and rescale to Qld RSC values
useBands = mcd43Image.select(["Nadir_Reflectance_Band4", "Nadir_Reflectance_Band1", "Nadir_Reflectance_Band2", "Nadir_Reflectance_Band5","Nadir_Reflectance_Band6","Nadir_Reflectance_Band7"],["band2","band3","band4","band5","band6","band7"]).add(1).divide(10000)
logBands = useBands.log();
# Combine the bands into a new image
# Note that this line is missing logBands.expression("b('band4') * b('band7')"),
return ee.Image.cat(
useBands.expression("b('band2') * b('band3')"),
useBands.expression("b('band2') * b('band4')"),
useBands.expression("b('band2') * b('band5')"),
useBands.expression("b('band2') * b('band6')"),
useBands.expression("b('band2') * b('band7')"),
useBands.expression("b('band2') * logs", {'logs': logBands}),
useBands.expression("b('band3') * b('band4')"),
useBands.expression("b('band3') * b('band5')"),
useBands.expression("b('band3') * b('band6')"),
useBands.expression("b('band3') * b('band7')"),
useBands.expression("b('band3') * logs", {'logs': logBands}),
useBands.expression("b('band4') * b('band5')"),
useBands.expression("b('band4') * b('band6')"),
useBands.expression("b('band4') * b('band7')"),
useBands.expression("b('band4') * logs", {'logs': logBands}),
useBands.expression("b('band5') * b('band6')"),
useBands.expression("b('band5') * b('band7')"),
useBands.expression("b('band5') * logs", {'logs': logBands}),
useBands.expression("b('band6') * b('band7')"),
useBands.expression("b('band6') * logs", {'logs': logBands}),
useBands.expression("b('band7') * logs", {'logs': logBands}),
logBands.expression("b('band2') * b('band3')"),
logBands.expression("b('band2') * b('band4')"),
logBands.expression("b('band2') * b('band5')"),
logBands.expression("b('band2') * b('band6')"),
logBands.expression("b('band2') * b('band7')"),
logBands.expression("b('band3') * b('band4')"),
logBands.expression("b('band3') * b('band5')"),
logBands.expression("b('band3') * b('band6')"),
logBands.expression("b('band3') * b('band7')"),
logBands.expression("b('band4') * b('band5')"),
logBands.expression("b('band4') * b('band6')"),
logBands.expression("b('band5') * b('band6')"),
logBands.expression("b('band5') * b('band7')"),
logBands.expression("b('band6') * b('band7')"),
useBands,
logBands,
ee.Image(0.25))
def GetTrendyMapId():
# Import MODIS Imagery and sort from most recent
mcd43a4 = ee.ImageCollection('MODIS/MCD43A4').sort('system:time_start', False )
# Apply the variable transforms and unmix
unmixedCollection = mcd43a4.map(lambda img:applytransforms(img).unmix([end_bare,end_gren,end_dead]).select(["band_0","band_1","band_2"]))
# Get the latest image
#latestImage = mcd43a4.sort('system:time_start', False ).limit(1).median()
#transformedImage = applytransforms(latestImage)
# Compute the cover Fractions
#coverFractions = transformedImage.unmix([end_bare,end_gren,end_dead]).select(["band_0","band_1","band_2"])
coverFractions = unmixedCollection.limit(3).median()
#return ee.Image(latestImage).select('Nadir_Reflectance_Band6', 'Nadir_Reflectance_Band2', 'Nadir_Reflectance_Band1').getMapId({'min': '100,100,100', 'max': '4000,4000,4000'})
return ee.Image(coverFractions).getMapId({'min': '0.1,0.0,0.15', 'max': '0.85,0.50,0.85'})
# """Returns the MapID for the night-time lights trend map."""
# collection = ee.ImageCollection(IMAGE_COLLECTION_ID)
# # Add a band containing image date as years since 1991.
# def CreateTimeBand(img):
# year = ee.Date(img.get('system:time_start')).get('year').subtract(1991)
# return ee.Image(year).byte().addBands(img)
# collection = collection.select('stable_lights').map(CreateTimeBand)
# # Fit a linear trend to the nighttime lights collection.
# fit = collection.reduce(ee.Reducer.linearFit())
# return fit.getMapId({
# 'min': '0',
# 'max': '0.18,20,-0.18',
# 'bands': 'scale,offset,scale',
# })
def GetPolygonTimeSeries(polygon_id):
"""Returns details about the polygon with the passed-in ID."""
details = memcache.get(polygon_id)
# If we've cached details for this polygon, return them.
if details is not None:
return details
details = {'wikiUrl': WIKI_URL + polygon_id.replace('-', '%20')}
try:
details['timeSeries'] = ComputePolygonTimeSeries(polygon_id)
# Store the results in memcache.
memcache.add(polygon_id, json.dumps(details), MEMCACHE_EXPIRATION)
except ee.EEException as e:
# Handle exceptions from the EE client library.
details['error'] = str(e)
# Send the results to the browser.
return json.dumps(details)
def ComputePolygonTimeSeries(polygon_id):
"""Returns a series of brightness over time for the polygon."""
collection = ee.ImageCollection(IMAGE_COLLECTION_ID)
collection = collection.select('stable_lights').sort('system:time_start')
feature = GetFeature(polygon_id)
# Compute the mean brightness in the region in each image.
def ComputeMean(img):
reduction = img.reduceRegion(
ee.Reducer.mean(), feature.geometry(), REDUCTION_SCALE_METERS)
return ee.Feature(None, {
'stable_lights': reduction.get('stable_lights'),
'system:time_start': img.get('system:time_start')
})
chart_data = collection.map(ComputeMean).getInfo()
# Extract the results as a list of lists.
def ExtractMean(feature):
return [
feature['properties']['system:time_start'],
feature['properties']['stable_lights']
]
return map(ExtractMean, chart_data['features'])
def GetFeature(polygon_id):
"""Returns an ee.Feature for the polygon with the given ID."""
# Note: The polygon IDs are read from the filesystem in the initialization
# section below. "sample-id" corresponds to "static/polygons/sample-id.json".
path = POLYGON_PATH + polygon_id + '.json'
path = os.path.join(os.path.split(__file__)[0], path)
with open(path) as f:
return ee.Feature(json.load(f))
###############################################################################
# Constants. #
###############################################################################
# Memcache is used to avoid exceeding our EE quota. Entries in the cache expire
# 24 hours after they are added. See:
# https://cloud.google.com/appengine/docs/python/memcache/
MEMCACHE_EXPIRATION = 60 * 60 * 24
# The ImageCollection of the night-time lights dataset. See:
# https://earthengine.google.org/#detail/NOAA%2FDMSP-OLS%2FNIGHTTIME_LIGHTS
IMAGE_COLLECTION_ID = 'NOAA/DMSP-OLS/NIGHTTIME_LIGHTS'
# The file system folder path to the folder with GeoJSON polygon files.
POLYGON_PATH = 'static/polygons/'
# The scale at which to reduce the polygons for the brightness time series.
REDUCTION_SCALE_METERS = 20000
# The Wikipedia URL prefix.
WIKI_URL = 'http://en.wikipedia.org/wiki/'
# The Computed Endmembers
end_gren = [1.005625793718393224e-01,1.551381619046308113e-01,1.792972445650116153e-01,1.756139193904675544e-01,1.409032551487235385e-01,-4.779793393912765698e-01,-3.863266414066787169e-01,-2.117543746254724746e-01,-1.678902136214300567e-01,-1.745431337396821381e-01,-2.403627985684871349e-01,1.985669393903456148e-01,1.986106673443171489e-01,1.798445329687358984e-01,1.347556128418881671e-01,-4.230897452709314055e-01,-4.306222970080941792e-01,-1.784168435713744949e-01,-2.087463228039195817e-01,-2.525836273873255378e-01,-3.519714061731354926e-01,2.493734504017069697e-01,2.756349644337070526e-01,2.214416586870512071e-01,-3.947699254951768655e-01,-2.140265169616704932e-01,-4.541274778620738029e-01,-3.787141551387274707e-01,-2.212936799517399578e-01,-1.761121705287621297e-01,2.131049159247370151e-01,1.555961382991326025e-01,-1.965963040151643970e-01,-2.830078567396365208e-01,-3.215068847604295454e-01,-4.270254941278601168e-01,-3.920532077778883795e-01,-4.393438610960144763e-01,1.139614936774503429e-01,-1.311489035400711933e-01,-3.933544485165051396e-01,-1.371423398967817342e-01,-3.626460253983346815e-01,-4.545950218460690917e-01,-5.836848311142593948e-01,-3.191089493163550700e-02,-3.224051458458987440e-01,-5.532773510021347929e-02,-2.759065442549579195e-01,-3.693207960693587477e-01,-4.824883750352684242e-01,-5.599811410645313403e-01,-4.618333710369355583e-01,2.638597724326164351e-01,3.657374965386450683e-01,3.747929451692371683e-01,6.583876748674915014e-01,5.451015751013433830e-01,1.083009465127019316e-01,-1.935712814253734149e-01,-6.658464327485663636e-01,7.608717656278304875e-02,-3.177089270701012325e-01,-5.280598956024164931e-02,-5.452106237221799878e-01,3.388294075045473752e-01,3.928265454868017370e-01,5.456166415438652439e-01,5.396204163342579463e-01,5.018642612505221923e-01,3.783475769849550807e-01,-5.231001864092722498e-01,-7.037557367909558215e-01,-2.423698219956213484e-01,-3.843516530893263949e-01,-4.458789415024453362e-01,-6.129339166090426172e-01,0.25];
end_dead = [1.403263555258253970e-01,1.740844187309274482e-01,2.143776613700686950e-01,2.255999818141902202e-01,1.798486122880957883e-01,-5.094852507233389449e-01,-3.943909593752877307e-01,-3.271005665360072756e-01,-2.398086866572867459e-01,-2.170372016296666096e-01,-2.928484835748055848e-01,2.223184524922829086e-01,2.575036368121275121e-01,2.602762597110128695e-01,1.931895170928574212e-01,-4.820346521695523245e-01,-4.530716612145118116e-01,-3.526026536396565381e-01,-2.871558826934162978e-01,-2.870497154688096408e-01,-4.239204810729221284e-01,2.484599564187424114e-01,2.861343176253623999e-01,2.281157131292944340e-01,-2.709993459043758546e-01,-1.894416977490074316e-01,-4.114788420817422909e-01,-3.305979743382004843e-01,-2.178179862993682714e-01,-1.965442916311349320e-01,3.017090569531513111e-01,2.272570720493811425e-01,-1.660063649921281748e-01,-2.078556996556621961e-01,-4.160412189662338611e-01,-3.834399519794880473e-01,-3.046072308937279871e-01,-3.455947654101590438e-01,2.145926981364830732e-01,-1.759330674468164712e-01,-3.226994801832344106e-01,-3.554447967112595030e-01,-3.608555462410469872e-01,-3.653556708703253886e-01,-5.288465893028195808e-01,4.871148572665796872e-02,-1.921852869110349249e-01,-1.538614444787815561e-01,-2.039512166097949830e-01,-2.536145208364682380e-01,-4.333713519267466396e-01,-3.290012282223230278e-01,-2.558320130370689283e-01,9.049690808748311888e-02,2.429555937637380980e-01,2.593593821267571875e-01,3.562463529501999071e-01,3.307446747853375335e-01,1.799196323512836926e-01,-2.219392694669459487e-01,-3.576993626520880709e-01,1.845227897749066731e-02,-1.685206360984441709e-01,5.580296776298742517e-02,-4.145629855116072515e-01,4.135862588851957344e-01,4.966752147834995745e-01,5.020998941719694297e-01,5.618702492116707248e-01,5.940529187364148589e-01,4.158125345134185968e-01,-3.348135919834706042e-01,-4.891960223481561787e-01,-4.775706041789192779e-01,-4.063217768189702483e-01,-3.902195969483839844e-01,-5.475248235551297693e-01,0.25];
end_bare = [1.557756133661153952e-01,1.802192110730069519e-01,2.214149969321710376e-01,2.378169515722290961e-01,2.167823936748812796e-01,-5.003139008582664360e-01,-3.858620353742320264e-01,-3.421557006601864126e-01,-2.561822930393134468e-01,-2.312584786760632782e-01,-2.534492576792338192e-01,2.320565217858724938e-01,2.736918995210841365e-01,2.878776178964150834e-01,2.581029762878466194e-01,-4.745006463997653023e-01,-4.212879123400909420e-01,-3.806564066788480361e-01,-3.037867195241924501e-01,-2.907748187098708748e-01,-3.319295543017048988e-01,2.418232712425302799e-01,2.813392488119939583e-01,2.763479007400641008e-01,-2.553586812522712912e-01,-2.148104467732427636e-01,-4.048503104537182762e-01,-3.368292685824204602e-01,-2.339882670913794593e-01,-1.459367612878482712e-01,2.994817350480266094e-01,2.997062696318842923e-01,-1.429721098700335868e-01,-2.050814814822079502e-01,-4.074130135695290811e-01,-3.783440378975941876e-01,-3.143608342429669023e-01,-2.387095954855755486e-01,3.026018626233351050e-01,-5.715470865161947911e-02,-1.997813302239145050e-01,-2.918934998203080999e-01,-3.067641746997048502e-01,-3.341194694934919718e-01,-3.347398350315027860e-01,-6.561644819416791174e-02,-2.114976663325430772e-01,-2.424937905246192793e-01,-2.467185800323841716e-01,-2.872824680797123054e-01,-3.344593134383595512e-01,-2.846135649729091277e-01,-2.302400388348356253e-01,6.664500979015522408e-02,2.303978017433380432e-01,2.281681038181598287e-01,2.555933168357400476e-01,2.387943621178491016e-01,7.191996580933962546e-02,-1.306805956480629749e-01,-3.364273224868226664e-01,1.181603266047939471e-01,-1.141124425180693319e-01,1.052852993043106866e-01,-4.738702699799992590e-01,4.284487379128869566e-01,5.211981857016393382e-01,4.921858390252278892e-01,5.432637332898500038e-01,5.550396166730965364e-01,5.269385781005170299e-01,-3.468334523171409667e-01,-5.224070998521183062e-01,-5.006934427040702351e-01,-4.453181641709889060e-01,-4.792341641719014556e-01,-4.946308016878979696e-01,0.25];
###############################################################################
# Initialization. #
###############################################################################
# Use our App Engine service account's credentials.
EE_CREDENTIALS = ee.ServiceAccountCredentials(config.EE_ACCOUNT, config.EE_PRIVATE_KEY_FILE)
# Read the polygon IDs from the file system.
POLYGON_IDS = [name.replace('.json', '') for name in os.listdir(POLYGON_PATH)]
# Create the Jinja templating system we use to dynamically generate HTML. See:
# http://jinja.pocoo.org/docs/dev/
JINJA2_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
# Initialize the EE API.
ee.Initialize(EE_CREDENTIALS)
|
import simulation
import matplotlib.pyplot as plt
# ticks are in seconds
tick = 1
sim_duration = 60 * 60 * 6
speed_kmh = 60
# Initialize all simulation classes
incident_sunlight = 1000
initial_battery_charge = 0.9
lvs_power_loss = 0
basic_array = simulation.BasicArray(incident_sunlight)
basic_array.set_produced_energy(0)
basic_battery = simulation.BasicBattery(initial_battery_charge)
basic_lvs = simulation.BasicLVS(lvs_power_loss * tick)
basic_motor = simulation.BasicMotor()
# For plotting purposes
batt_charge = []
batt_voltage = []
time = []
for i in range(sim_duration):
# Get produced energy from arrays
basic_array.update(tick)
produced_energy = basic_array.get_produced_energy()
# Get consumed energy from LVS
basic_lvs.update(tick)
lvs_consumed_energy = basic_lvs.get_consumed_energy()
# Get consumed energy from motor
basic_motor.update(tick)
basic_motor.calculate_power_in(speed_kmh)
motor_consumed_energy = basic_motor.get_consumed_energy()
# Add up energy balance on the battery
basic_battery.update(tick)
basic_battery.charge(produced_energy)
try:
basic_battery.discharge(lvs_consumed_energy)
basic_battery.discharge(motor_consumed_energy)
except simulation.BatteryEmptyError as exc:
print(exc)
battery_energy = basic_battery.get_stored_energy()
battery_charge = basic_battery.get_state_of_charge()
battery_voltage = basic_battery.get_output_voltage()
# For plotting purposes, sample every minute
if i % 60 == 0:
batt_charge.append(battery_charge)
batt_voltage.append(battery_voltage)
time.append(int(i / 60))
# Plot SOC vs time curve
plt.plot(time, batt_charge)
plt.xlabel("time in minutes")
plt.ylabel("% SOC")
plt.ylim(0, 1.0)
plt.title("% SOC vs time at {} kmh".format(speed_kmh))
plt.show()
|
import myth.frontend
import socket
frontend = myth.frontend.Frontend(('10.1.1.6', 6543))
#sock = socket.socket()
#sock.connect(('10.1.1.6', 6543))
#version = myth.protocol.ProtocolVersion(sock, 31)
#version.send()
#version.recv()
#ann = myth.protocol.Announce(sock, 'chipotle')
#ann.send()
#ann.recv()
#filetrans = myth.protocol.AnnounceFileTransfer(sock, 'chipotle', '/2131_20061108060653.mpg')
#filetrans.send()
#filetrans.recv()
#print 'Socket: ', filetrans.getSocket()
#print 'Size: ', filetrans.getSize()
#slave = myth.protocol.AnnounceSlave(sock, 'chipotle.csh.rit.edu')
#slave.send()
#slave.recv()
#recordings = myth.protocol.QueryRecordings(sock)
#recordings.send()
#recordings.recv()
#rec = recordings.getRecordings()
#freespace = myth.protocol.QueryFreeSpace(sock)
#freespace.send()
#freespace.recv()
#print repr(freespace.getSpace())
#load = myth.protocol.QueryLoad(sock)
#load.send()
#load.recv()
#print repr(load.getLoads())
#uptime = myth.protocol.QueryUptime(sock)
#uptime.send()
#uptime.recv()
#print uptime.getUptime()
#mem = myth.protocol.QueryMemStats(sock)
#mem.send()
#mem.recv()
#print 'Virtual: ', repr(mem.getVirtual())
#print 'Physical:', repr(mem.getPhysical())
#checkfile = myth.protocol.QueryCheckFile(sock, '2131_20061108060653.mpg')
#checkfile.send()
#checkfile.recv()
#print 'File exists: ', repr(checkfile.exists())
#guidedata = myth.protocol.QueryGuideDataThrough(sock)
#guidedata.send()
#guidedata.recv()
#print "Guide data available until: ", repr(guidedata.getDate())
#stoprecord = myth.protocol.StopRecording(sock, rec[0])
#stoprecord.send()
#stoprecord.recv()
#checkrecord = myth.protocol.CheckRecording(sock, rec[12])
#checkrecord.send()
#checkrecord.recv()
#print rec[12]
#print 'Starttime: ', rec[12].getStartTime()
#for i in range(len(rec)):
# print i, '\t', rec[i].getTitle()
#deleterec = myth.protocol.DeleteRecording(sock, rec[12])
#deleterec.send()
#deleterec.recv()
#resched = myth.protocol.RescheduleRecordings(sock, rec[12].getRecordID())
#resched.send()
#resched.recv()
#forget = myth.protocol.ForgetRecording(sock, rec[12])
#forget.send()
#forget.recv()
#pending = myth.protocol.QueryGetAllPending(sock)
#pending.send()
#pending.recv()
#rec = pending.getPending()
#for i in range(len(rec)):
# print rec[i].getTitle()
#print len(rec), 'pending recordings'
#scheduled = myth.protocol.QueryGetAllPending(sock, scheduled=True)
#scheduled.send()
#scheduled.recv()
#sch = scheduled.getPending()
#for i in sch:
# print i.getTitle()
#print len(sch), 'scheduled recordings'
#conflict = myth.protocol.QueryGetConflicting(sock, rec[0])
#conflict.send()
#conflict.recv()
#expire = myth.protocol.QueryGetAllExpiring(sock)
#expire.send()
#expire.recv()
#for i in expire.getExpiring():
# print i.getTitle()
#freeRec = myth.protocol.GetFreeRecorder(sock)
#freeRec.send()
#freeRec.recv()
#freeCount = myth.protocol.GetFreeRecorderCount(sock)
#freeCount.send()
#freeCount.recv()
#freeList = myth.protocol.GetFreeRecorderList(sock)
#freeList.send()
#freeList.recv()
#rec = myth.protocol.QueryRecorder(sock, 1)
#rec.send()
#rec.recv()
#isrec = myth.protocol.RecorderIsRecording(sock, 2)
#isrec.send()
#isrec.recv()
#print repr(isrec.getRecording())
#done = myth.protocol.Done(sock)
#done.send()
#done.recv()
#sock.close()
|
#!/usr/bin/env python
import os
import argparse
import imp
import cPickle as pkl
import numpy as np
import shutil
import cv2
import datetime
import glob
import sys
import json
# TODO simplify this class
class RobotEnvironment:
def __init__(self, exp_path, robot_name, conf, resume=False, ngpu=1, gpu_id=0, is_bench=False, env_metadata=None):
self._env_metadata, self._saved_metadata = env_metadata, False
self._start_time = datetime.datetime.now()
if 'override_{}'.format(robot_name) in conf:
override_params = conf['override_{}'.format(robot_name)]
conf['agent'].update(override_params.get('agent', {}))
conf['agent']['env'][1].update(override_params.get('env_params', {}))
conf['policy'].update(override_params.get('policy', {}))
# sets maximum number of re-tries in case of failure in environment
if 'imax' not in conf['agent']:
conf['agent']['imax'] = 5
if 'RESULT_DIR' in os.environ:
exp_path = exp_path.split('/')
exp_index = min(max([i for i, v in enumerate(exp_path) if v == 'experiments'] + [0]) + 1, len(exp_path) - 1)
exp_name = '/'.join(exp_path[exp_index:])
conf['agent']['data_save_dir'] = '{}/{}'.format(os.environ['RESULT_DIR'], exp_name)
self._hyperparams = conf
self.agentparams, self.policyparams, self.envparams = conf['agent'], conf['policy'], conf['agent']['env'][1]
self.envparams['robot_name'] = self.agentparams['robot_name'] = robot_name
self._is_bench = is_bench
if is_bench:
self.task_mode = '{}/{}'.format(robot_name, conf.get('experiment_name', 'exp'))
self.agentparams['env'][1]['start_at_neutral'] = True # robot should start at neutral during benchmarks
else:
self.task_mode = '{}/{}'.format(robot_name, conf.get('mode', 'train'))
self._ngpu = ngpu
self._gpu_id = gpu_id
#since the agent interacts with Sawyer, agent creation handles recorder/controller setup
self.agent = self.agentparams['type'](self.agentparams)
self.policy = self.policyparams['type'](self.agentparams, self.policyparams, self._gpu_id, self._ngpu)
robot_dir = self.agentparams['data_save_dir'] + '/{}'.format(robot_name)
if not os.path.exists(robot_dir):
os.makedirs(robot_dir)
self._ck_path = self.agentparams['data_save_dir'] + '/{}/checkpoint.pkl'.format(robot_name)
self._ck_dict = {'ntraj': 0, 'broken_traj': []}
if resume:
if resume == -1 and os.path.exists(self._ck_path):
with open(self._ck_path, 'rb') as f:
self._ck_dict = pkl.load(f)
else:
self._ck_dict['ntraj'] = max(int(resume), 0)
self._hyperparams['start_index'] = self._ck_dict['ntraj']
def run(self):
if not self._is_bench:
for i in xrange(self._hyperparams['start_index'], self._hyperparams['end_index']):
self.take_sample(i)
else:
itr = 0
continue_collection = True
while continue_collection:
self.take_sample(itr)
itr += 1
continue_collection = 'y' in raw_input('Continue collection? (y if yes):')
self.agent.cleanup()
def _get_bench_name(self):
name = raw_input('input benchmark name: ')
while len(name) < 2:
print('please choose a name > 2 characters long')
name = raw_input('input benchmark name: ')
return name
def take_sample(self, sample_index):
data_save_dir = self.agentparams['data_save_dir'] + '/' + self.task_mode
if self._is_bench:
bench_name = self._get_bench_name()
traj_folder = '{}/{}'.format(data_save_dir, bench_name)
self.agentparams['_bench_save'] = '{}/exp_data'.format(traj_folder) # probably should develop a better way
self.agentparams['benchmark_exp'] = bench_name # to pass benchmark info to agent
self.agentparams['record'] = traj_folder + '/traj_data/record'
print("Conducting experiment: {}".format(bench_name))
traj_folder = traj_folder + '/traj_data'
if os.path.exists(traj_folder):
shutil.rmtree(traj_folder)
os.makedirs(traj_folder)
else:
start_str = self._start_time.strftime('%b_%d_%Y_%H:%M:%S')
group_folder = data_save_dir + '/collection_started_{}'.format(start_str)
traj_folder = group_folder + '/traj{}'.format(sample_index)
print("Collecting sample {}".format(sample_index))
agent_data, obs_dict, policy_out = self.agent.sample(self.policy, sample_index)
if self._hyperparams['save_data']:
self._save_raw_images(traj_folder, agent_data, obs_dict, policy_out)
self._ck_dict['ntraj'] += 1
ck_file = open(self._ck_path, 'wb')
pkl.dump(self._ck_dict, ck_file)
ck_file.close()
print("CHECKPOINTED")
def _save_raw_images(self, traj_folder, agent_data, obs_dict, policy_outputs):
if not self._is_bench:
if os.path.exists(traj_folder):
shutil.rmtree(traj_folder)
os.makedirs(traj_folder)
if self._env_metadata and not self._saved_metadata:
self._env_metadata['environment_size'] = (obs_dict['high_bound'][0] - obs_dict['low_bound'][0]).tolist()
self._env_metadata['low_bound'] = obs_dict['low_bound'][0].tolist()
self._env_metadata['high_bound'] = obs_dict['high_bound'][0].tolist()
save_path = '/'.join(traj_folder.split('/')[:-1]) + '/hparams.json'
assert not os.path.exists(save_path), "json already exists!"
json.dump(self._env_metadata, open(save_path, 'w'))
self._saved_metadata = True
print('saving data to ', traj_folder)
if 'images' in obs_dict:
images = obs_dict.pop('images')
T, n_cams = images.shape[:2]
for i in range(n_cams):
os.mkdir(traj_folder + '/images{}'.format(i))
for t in range(T):
for i in range(n_cams):
cv2.imwrite('{}/images{}/im_{}.jpg'.format(traj_folder, i, t), images[t, i, :, :, ::-1])
if 'goal_image' in obs_dict:
goal_images = obs_dict.pop('goal_image')
for n in range(goal_images.shape[0]):
cv2.imwrite('{}/goal_image{}.jpg'.format(traj_folder, n),
(goal_images[n, :, :, ::-1] * 255).astype(np.uint8))
with open('{}/agent_data.pkl'.format(traj_folder), 'wb') as file:
pkl.dump(agent_data, file)
with open('{}/obs_dict.pkl'.format(traj_folder), 'wb') as file:
pkl.dump(obs_dict, file)
with open('{}/policy_out.pkl'.format(traj_folder), 'wb') as file:
pkl.dump(policy_outputs, file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('robot_name', type=str, help="name of robot we're running on")
parser.add_argument('experiment', type=str, help='experiment name')
parser.add_argument('-r', nargs='?', dest='resume', const=-1,
default=False, help='Set flag if resuming training (-r if from checkpoint, -r <traj_num> otherwise')
parser.add_argument('--gpu_id', type=int, default=0, help='value to set for cuda visible devices variable')
parser.add_argument('--ngpu', type=int, default=1, help='number of gpus to use')
parser.add_argument('--benchmark', action='store_true', default=False,
help='Add flag if this experiment is a benchmark')
args = parser.parse_args()
hyperparams = imp.load_source('hyperparams', args.experiment)
conf = hyperparams.config
env_data = None
possible_metadata = glob.glob('/'.join(args.experiment.split('/')[:-1]) + '/*.json')
if len(possible_metadata) == 1:
env_data = json.load(open(possible_metadata[0], 'r'))
print("METADATA LOADED")
for k, v in env_data.items():
print("{}= {}".format(k, v))
assert raw_input('Everything okay? (y to continue): ') == 'y'
else:
print("Can't load meta-data!")
import time
time.sleep(3.0) # add annoying warning
env = RobotEnvironment(args.experiment, args.robot_name, conf, args.resume, args.ngpu, args.gpu_id, args.benchmark, env_data)
env.run()
|
"""Dicionário
Escreva uma função que simula a função dict() do Python.
"""
def myDict(**keywords: dict) -> dict:
return keywords
print(myDict(a=10, b=20, c=30, d=40, e=50))
|
import numpy as np
import cv2
import gc
class Image(object):
"""The base-class for image input"""
def __init__(self, image=None):
if image is not None:
self.image = image
def imread(self, path):
self.image = cv2.imread(path)
def extract_convWin(self, x, y, offset, xOff=None, yOff=None):
return {"win": self.image[x:x + offset, y:y + offset], "i": x, "j": y, "x": xOff, "y": yOff}
def save_image(self, path):
cv2.imwrite(path, self.image)
class ImageGen(Image):
"""Utility functions for pre-processing the images"""
def __init__(self, image=None):
super(ImageGen, self).__init__(image)
self.delta = None
def add_noise(self, delta):
self.delta = delta
noise = np.random.rand(self.image.shape[0], self.image.shape[1])
noise[noise > delta] = 1
noise[noise <= delta] = 0
self.image = self.image * noise
class convWins_Generator(Image):
"""A class for getting a "generator" of possible convolution windows"""
def __init__(self, image=None):
super(convWins_Generator, self).__init__(image)
def extract_jumpingWins(self, offset):
iRange = [offset * x for x in range(self.image.shape[0] // offset)]
jRange = [offset * x for x in range(self.image.shape[1] // offset)]
for i in iRange:
for j in jRange:
yield self.extract_convWin(i, j, offset, iRange.index(i), jRange.index(j))
def extract_slidingWins(self, offset):
iRange = [x for x in range(self.image.shape[0]) if x + offset - 1 < self.image.shape[0]]
jRange = [x for x in range(self.image.shape[1]) if x + offset - 1 < self.image.shape[1]]
for i in iRange:
for j in jRange:
yield self.extract_convWin(i, j, offset)
class Conv(convWins_Generator, ImageGen):
"""The operator class for different Convloutions.
params:
name --> name of the image or operation.
image --> a numpy array of image.
functions:
conv_kernals --> convolution of a specific kernal(or pre-loaded kernals, refer doc-string of the fuction) on to the image.
conv_pooling --> pooling on the image (for more info refer its doc-string).
gaussian_conv --> gaussian processing on the image(for more info refer its doc-string).
get_gaussian --> returns a gaussian kernal of desired size and standard deviation.
get_sobel --> returns the complete sobel edges of the image.
single_threshold --> thresholding the image.
double_threshold --> thresholding the image agains two limits.
hysteresis --> suprressing the edges-points which are not connected to strong edges-points in a double thresholded image.
save_output --> saves the output of the recent operation.
"""
def __init__(self, name, image=None):
super(Conv, self).__init__(image)
self.kernals = {
"sobelX": {
"kernal": np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]),
"offset": 0.125
},
"sobelY": {
"kernal": np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]]),
"offset": 0.125
},
"gaussian": {
"kernal": self.get_gaussian(3, 1),
"offset": 1
},
"laplacian": {
"kernal": np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]]),
"offset": 1
}
}
self.name = name
self.pooling = {
"average": np.average,
"median": np.median,
"max": np.max,
"min": np.min
}
def conv_kernals(self, mode, kernalType, Ckernal=None, truConv=True, save=False):
"""
params:
mode --> "sliding" or "jumping" convolutions.
kernalType --> name of the kernal.
the pre loaded kernals are - "sobelX", "sobelY" and "gaussian 3x3"
Ckernal --> dictionary with a numpy array(square matrix) "kernal" and a "offset" for normalization,
if used the pre loaded kernals this param can be ignored if not "keranlType" still should be specified.
save --> True if the output is to be saved to disk.
retuns:
The resultant numpy array image.
"""
if Ckernal is None:
kernal = self.kernals[kernalType]["kernal"]
offset = self.kernals[kernalType]["offset"]
else:
kernal = Ckernal["kernal"]
offset = Ckernal["offset"]
h, w = self.image.shape[0], self.image.shape[1]
if truConv:
h, w = self.image.shape[0] - kernal.shape[0] + 1, self.image.shape[1] - kernal.shape[0] + 1
if mode == "sliding":
delta = self.image.shape[0] - kernal.shape[0]
delta = delta % kernal.shape[0]
slide_ConvOutput = np.zeros([h, w])
for patch in self.extract_slidingWins(kernal.shape[0]):
slide_ConvOutput[patch["i"], patch["j"]] = offset * np.sum(patch["win"] * kernal)
self.output = slide_ConvOutput
else:
jump_ConvOutput = np.zeros([self.image.shape[0] // kernal.shape[0],
self.image.shape[1] // kernal.shape[0]])
for patch in self.extract_jumpingWins(kernal.shape[0]):
jump_ConvOutput[patch["x"], patch["y"]] = offset * np.sum(patch["win"] * kernal)
self.output = jump_ConvOutput
if save:
cv2.imwrite(f"{self.name}_{kernalType}_with_{mode}_conv.png", self.output)
gc.collect()
return self.output
def conv_pooling(self, pooling_type, mode, winSize, truConv=True, save=False):
"""
params:
pooling_type --> "average", "median", "max" and "min"
mode --> sliding or jumping convolutions
winSize --> window size.
save --> True if the output is to be saved to disk.
returns : the resultant numpy array image.
"""
func = self.pooling[pooling_type]
offset = winSize
h, w = self.image.shape[0], self.image.shape[1]
if truConv:
h, w = self.image.shape[0] - offset + 1, self.image.shape[1] - offset + 1
if mode == "sliding":
slide_poolingOut = np.zeros([h, w])
for patch in self.extract_slidingWins(offset):
slide_poolingOut[patch["i"], patch["j"]] = func(patch["win"])
self.output = slide_poolingOut
else:
jump_poolingOut = np.zeros([self.image.shape[0] // offset, self.image.shape[1] // offset])
for patch in self.extract_jumpingWins(offset):
jump_poolingOut[patch["x"], patch["y"]] = func(patch["win"])
self.output = jump_poolingOut
if save:
cv2.imwrite(f"{self.name}_averageSmoothing_offset_{offset}_with_{mode}_conv.png", self.output)
gc.collect()
return self.output
def gaussian_conv(self, mode, size, alpha, save=False):
"""
Gaussian smoothing of a image.
params:
mode: "sliding" or "jumping"
size: size of the gaussian window
alpha: standard deviation.
save: True to save the result to disk
returns:
the output of the convolution.
"""
Ckernal = {
"kernal": self.get_gaussian(size, alpha),
"offset": 1
}
self.output = self.conv_kernals(mode, f"gaussianKernal_Size_{size}_alpha_{alpha}", Ckernal, save)
return self.output
def get_gaussian(self, size, alpha):
"""
Get a gaussian kernal of desired size.
params:
size: size of the kernal
alpha: standard deviation
returns:
a numpy array gaussian kernal with the desired size and standard deviation.
"""
G = np.zeros([size, size])
k = (size - 1) / 2
nTerm = 1 / (2 * np.math.pi * np.square(alpha))
for i in range(1, size + 1):
for j in range(1, size + 1):
x = i - k - 1
y = j - k - 1
G[i - 1, j - 1] = nTerm * np.exp(-((np.square(x) + np.square(y)) / (2 * np.square(alpha))))
if np.sum(G) != 0:
G /= G.sum()
else:
raise Exception("Zero divide error")
return G
def get_sobel(self, mode, save=False):
"""
For computing the complete sobel edges.
params:
mode: "sliding" or "jumping"
save: True to save the result
returns:
the output of the operation.
"""
sobelX = self.conv_kernals(mode, "sobelX")
sobelY = self.conv_kernals(mode, "sobelY")
self.output = np.sqrt(np.square(sobelX) + np.square(sobelY))
gc.collect()
if save:
cv2.imwrite(f"{self.name}_SobelXY_with_{mode}_conv.png", self.output)
return self.output
def __threshold(self, thres, value, mode):
"""
A utility function for "n" thresholding the image.
params:
thres: threshold value.
value: value to asign for the operation
mode: "min" or "max"
returns:
the thresholded image.
"""
if mode == "max":
self.output[self.output >= thres] = value
else:
self.output[self.output <= thres] = value
return self.output
def single_threshold(self, thres, max_value, min_value, save=False):
"""
single thresholding the image.
params:
threshold: single threshold value.
max_value: value to assign, if above threshold.
min value: value to assign, if below threshold.
returns:
the single thresholded image.
"""
self.output = self.image.copy()
gc.collect()
self.__threshold(thres, max_value, "max")
self.__threshold(thres, min_value, "min")
gc.collect()
if save:
self.save_output(f"{self.name}_threshold_{thres}.png")
return self.output
def double_threshold(self, max_thres, max_value, min_thres, min_value, save=False):
"""
double thresholding the image.
params:
max_thres: upper threshold value.
min_thres: lower threshold value.
max_value: value to assign, if above threshold.
min value: value to assign, if below threshold.
returns:
Applies double thresholding on the image and returns.
"""
self.output = self.image.copy()
self.__threshold(max_thres, max_value, "max")
self.__threshold(min_thres, min_value, "min")
gc.collect()
if save:
self.save_output(f"{self.name}_doubleThreshold_{max_thres}_{min_thres}.png")
return self.output
def hysteresis(self, winSize, max_thres, min_thres, max_value, min_value, save=False):
"""
single thresholding the image.
params:
winSize: window size.
max_thres: upper threshold value.
min_thres: lower threshold value.
max_value: value to assign, if above threshold.
min value: value to assign, if below threshold.
returns:
Applies hysteresis on the image and returns it.
"""
temp = self.double_threshold(max_thres, max_value, min_thres, min_value).copy()
imageTemp = self.image.copy()
self.image = self.output.copy()
self.conv_pooling("max", "sliding", winSize, truConv=False)
prime_mask = np.logical_and(temp >= min_thres, temp <= max_thres)
mask_u = np.logical_and(prime_mask, self.output >= max_value)
mask_d = np.logical_and(prime_mask, self.output < max_value)
temp[mask_u] = max_value
temp[mask_d] = min_value
self.image = imageTemp
self.output = temp
gc.collect()
if save:
self.save_output(f"{self.name}_hysteresis_{max_thres}_{min_thres}_winSize_{winSize}.png")
return self.output
def save_output(self, path):
"""Saves the output of the recent operation
Params:
path : path to save the file
"""
cv2.imwrite(path, self.output)
|
# -*- coding: utf-8 -*-
"""The hashing analyzer implementation."""
from __future__ import unicode_literals
from plaso.analyzers import interface
from plaso.analyzers import logger
from plaso.analyzers import manager
from plaso.analyzers.hashers import manager as hashers_manager
from plaso.containers import analyzer_result
from plaso.lib import definitions
class HashingAnalyzer(interface.BaseAnalyzer):
"""This class contains code for calculating file hashes of input files.
In Plaso, hashers are classes that map arbitrarily sized file content to a
fixed size value. See: https://en.wikipedia.org/wiki/Hash_function
"""
NAME = 'hashing'
DESCRIPTION = 'Calculates hashes of file content.'
PROCESSING_STATUS_HINT = definitions.STATUS_INDICATOR_HASHING
INCREMENTAL_ANALYZER = True
def __init__(self):
"""Initializes a hashing analyzer."""
super(HashingAnalyzer, self).__init__()
self._hasher_names_string = ''
self._hashers = []
def Analyze(self, data):
"""Updates the internal state of the analyzer, processing a block of data.
Repeated calls are equivalent to a single call with the concatenation of
all the arguments.
Args:
data (bytes): block of data from the data stream.
"""
for hasher in self._hashers:
hasher.Update(data)
def GetResults(self):
"""Retrieves the hashing results.
Returns:
list[AnalyzerResult]: results.
"""
results = []
for hasher in self._hashers:
logger.debug('Processing results for hasher {0:s}'.format(hasher.NAME))
result = analyzer_result.AnalyzerResult()
result.analyzer_name = self.NAME
result.attribute_name = hasher.ATTRIBUTE_NAME
result.attribute_value = hasher.GetStringDigest()
results.append(result)
return results
def Reset(self):
"""Resets the internal state of the analyzer."""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
self._hasher_names_string)
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
def SetHasherNames(self, hasher_names_string):
"""Sets the hashers that should be enabled.
Args:
hasher_names_string (str): comma separated names of hashers to enable.
"""
hasher_names = hashers_manager.HashersManager.GetHasherNamesFromString(
hasher_names_string)
debug_hasher_names = ', '.join(hasher_names)
logger.debug('Got hasher names: {0:s}'.format(debug_hasher_names))
self._hashers = hashers_manager.HashersManager.GetHashers(hasher_names)
self._hasher_names_string = hasher_names_string
manager.AnalyzersManager.RegisterAnalyzer(HashingAnalyzer)
|
import oofcppc
from ooflib.SWIG.common.IO import stopper
from threading import *
import sys
import time
from types import *
import string
class TextProgressBar:
def __init__(self, type = "continuous", name_top = " "):
if type == "continuous":
self.progressbar = stopper.cProgressBar(1, name_top)
elif type =="active":
self.progressbar = stopper.cProgressBar(0, name_top)
elif type =="quiet": ## no display bar
self.progressbar = stopper.cProgressBar(-1,name_top)
self.my_stopper = self.progressbar.get_stopper()
def stop_it(self, widget):
self.my_stopper.set_click()
def get_bar(self):
return self.progressbar
def get_stopper(self):
return self.my_stopper
def get_message(self):
return self.progressbar.get_message()
def update(self,value = None):
self.progressbar.update(value)
def set_message(self,text =None):
if not text:
text = " "
self.progressbar.set_message(text);
def get_fraction(self):
return self.progressbar.get_fraction()
def displaybar(self):
self.progressbar.print_message()
## Be careful to NOT create any gtk objects in the thread.
## The thread is meant only to update the GUI through passed referenced variables.
class Worker (Thread):
def __init__ (self, widget, thread_id):
Thread.__init__(self)
self.widget = widget
self.thread_id = thread_id
def run (self):
num_cycles =5000
for i in range(num_cycles):
if self.widget.my_stopper.quit():
self.widget.set_message("\nThread aborted\n")
## self.widget.displaybar()
## time.sleep(0.1)
return
time.sleep(0.1)
## Here, the progress bar is updated
self.widget.update(float(i+1)/float(num_cycles))
## self.widget.displaybar()
## Progress bar update section ends here
## Notify that thread has ended
## self.widget.set_message("\nThread finished\n")
self.widget.displaybar()
self.widget.my_stopper.set_click()
time.sleep(0.1)
## Debugging code starts here
def start_new_thread ():
threadcount = activeCount()
## create text bar
a_bar_obj = stopper.TextProgressBar("active", "Thread " + str(threadcount))
the_stopper = a_bar_obj.get_stopper()
## create thread
a = Worker(a_bar_obj, threadcount)
a.start()
interrupt = 1
try:
while interrupt :
time.sleep(1)
a_bar_obj.displaybar()
if the_stopper.quit():
interrupt = 0
a_bar_obj.set_message("\nThread finished\n")
a_bar_obj.displaybar()
except KeyboardInterrupt:
the_stopper.set_click()
interrupt = 0
a_bar_obj.set_message("\nThread aborted\n")
a_bar_obj.displaybar()
## print "Just hit ctrl-C! in main"
def only_while():
interrupt = 1
try:
while interrupt:
pass
except KeyboardInterrupt:
interrupt = 0
print "Just hit ctrl-C! in main"
## only_while()
start_new_thread()
|
import numpy as np
def get_hits(adj_matrix, EPSILON = 0.001):
"""[summary]
hubs & authorities calculation
Arguments:
adj_matrix {float[][]} -- [input Adjacent matrix lists like [[1, 0], [0, 1]]
Keyword Arguments
EPSILON {float} -- [factor of change comparision] (default: {0.001})
Returns:
[(float[], float[])] -- [return hubs & authorities]
"""
# initialize to all 1's
is_coverage = False
hubs = np.ones(adj_matrix.shape[0])
authorities = np.ones(adj_matrix.shape[0])
while not is_coverage:
# a = A.T h, h = A a,
new_authorities = np.dot(adj_matrix.T, hubs)
new_hubs = np.dot(adj_matrix, authorities)
# normalize
normalize_auth = lambda x: x / sum(new_authorities)
normalize_hubs = lambda x: x / sum(new_hubs)
new_authorities = normalize_auth(new_authorities)
new_hubs = normalize_hubs(new_hubs)
# check is coverage
diff = abs(sum(new_hubs - hubs) + sum(new_authorities - authorities))
if diff < EPSILON:
is_coverage = True
else:
authorities = new_authorities
hubs = new_hubs
return (new_hubs, new_authorities)
|
from setuptools import setup, find_packages
setup(
name='luxai2021',
version='0.1.0',
author='Geoff McDonald',
author_email='glmcdona@gmail.com',
packages=find_packages(exclude=['tests*']),
url='http://pypi.python.org/pypi/luxai2021/',
license='MIT',
description='Matching python environment code for Lux AI 2021 Kaggle competition, and a gym interface for RL models.',
long_description=open('README.md').read(),
install_requires=[
"pytest",
"stable_baselines3",
"numpy",
"tensorboard"
],
package_data={'luxai2021': ['game/game_constants.json']},
test_suite='nose2.collector.collector',
tests_require=['nose2'],
)
|
"""Implementation of the Bingham Mixture Loss"""
import torch
from .maad import angular_loss_single_sample
from .bingham_fixed_dispersion import BinghamFixedDispersionLoss
from .bingham_loss import BinghamLoss
from .gram_schmidt import gram_schmidt_batched
from utils import vec_to_bingham_z_many
class BinghamMixtureLoss(object):
""" Bingham Mixture Loss
Computes the log likelihood bingham mixture loss on a batch. Can be
configured such that for a predefined number of epochs
Arguments:
lookup_table_file (str): Path to the location of the lookup table.
mixture_component_count (int): Number of Bingham mixture components.
interpolation_kernel (str): The kernel to use for rbf interpolaition
(can be "multiquadric" or "gaussian").
fixed_dispersion_stage (int): Number of epochs in which the network is
trained using a fixed dispersion parameter z.
fixed_param_z (list): The fixed dispersion parameter Z used for all
mixture components during the fixed dispersion stage.
Inputs:
target (torch.Tensor): Target values at which the likelihood is
evaluated of shape (N, 4)
output (torch.Tensor): Output values from which M and Z are extracted of
shape (N, MIXTURE_COMPONENT_COUNT * 20). The first of the 20 values
per mixture component is for computing the weight of that component.
The remaining 19 are passed on to the BinghamLoss class.
"""
def __init__(self, lookup_table_file, mixture_component_count,
interpolation_kernel="multiquadric", fixed_dispersion_stage=25,
fixed_param_z=[-1, -1, -1, 0]):
self._num_components = mixture_component_count
self._fixed_dispersion_stage = fixed_dispersion_stage
self._softmax = torch.nn.Softmax(dim=1)
self._bingham_fixed_dispersion_loss = BinghamFixedDispersionLoss(
fixed_param_z, orthogonalization="gram_schmidt")
self._bingham_loss = BinghamLoss(
lookup_table_file, interpolation_kernel,
orthogonalization="gram_schmidt")
def __call__(self, target, output, epoch):
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
log_likelihood = torch.tensor(0., device=output.device, dtype=output.dtype)
for i in range(batch_size):
current_likelihood = torch.tensor(
0., device=output.device, dtype=output.dtype)
for j in range(self._num_components):
if epoch < self._fixed_dispersion_stage:
bd_log_likelihood = self._bingham_fixed_dispersion_loss(
target[i].unsqueeze(0),
output[i, (j*20+4):((j+1)*20)].unsqueeze(0))[1]
else:
bd_log_likelihood = self._bingham_loss(
target[i].unsqueeze(0),
output[i, (j*20+1):((j+1)*20)].unsqueeze(0))[1]
current_likelihood += weights[i, j] * \
torch.exp(bd_log_likelihood).squeeze()
log_likelihood += torch.log(current_likelihood)
loss = -log_likelihood
log_likelihood /= batch_size
return loss, log_likelihood
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): NN output shaped as loss output parameter.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
maad = torch.zeros(
batch_size, device=output.device, dtype=output.dtype)
mode_stats = dict()
for j in range(self._num_components):
bd_z = torch.mean(vec_to_bingham_z_many(
output[:, (j*20+1):(j*20+4)]
).squeeze(0), 0)
mode_stats["mode_" + str(j) + "_weight"] \
= float(torch.mean(weights[:, j]))
if epoch >= self._fixed_dispersion_stage:
mode_stats["mode_" + str(j) + "_z_0"] = float(bd_z[0])
mode_stats["mode_" + str(j) + "_z_1"] = float(bd_z[1])
mode_stats["mode_" + str(j) + "_z_2"] = float(bd_z[2])
param_m = torch.zeros((batch_size, self._num_components, 4, 4),
device=output.device, dtype=output.dtype)
for j in range(self._num_components):
param_m[:, j, :, :] = gram_schmidt_batched(
output[:, (j * 20 + 4):((j + 1) * 20)].reshape(batch_size, 4, 4)
)
# Setting mmaad to 10 such that the minimum succeeds in the first run.
mmaad = 10. * torch.ones(
batch_size, device=output.device, dtype=output.dtype)
for i in range(batch_size):
for j in range(self._num_components):
cur_angular_deviation = angular_loss_single_sample(
target[i], param_m[i, j, :, 3])
maad[i] += cur_angular_deviation * weights[i, j]
mmaad[i] = torch.min(mmaad[i], cur_angular_deviation)
maad = torch.mean(maad)
mmaad = torch.mean(mmaad)
stats = {
"maad": float(maad),
"mmaad": float(mmaad)
}
stats.update(mode_stats)
return stats
|
from datetime import timedelta
from mimetypes import guess_type
from secrets import token_urlsafe
from django.contrib.auth.hashers import (
check_password,
make_password,
)
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.http import HttpResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils import timezone
from django.utils.crypto import get_random_string
class User(AbstractUser):
user_agent = models.CharField(max_length=100)
class ProtectedItem(models.Model):
created = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(
User, related_name="items_created", on_delete=models.CASCADE
)
token = models.CharField(max_length=45, unique=True)
password = models.CharField(max_length=128)
accessed = models.IntegerField(default=0)
def __str__(self):
return f"{self.token} by {self.owner} created at {self.created} accessed {self.accessed} times"
@property
def is_expired(self):
return self.created + timedelta(hours=24) < timezone.now()
def count_correct_redirect(self):
self.accessed += 1
def set_password(self, raw_password):
self.password = make_password(raw_password)
def create_random_password(self):
password = get_random_string()
self.set_password(password)
return password
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def save(self, *args, **kwargs):
if self.pk is None:
self.token = token_urlsafe()
super(ProtectedItem, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("protected", args=[self.token])
def get_response(self):
if hasattr(self, "protectedurl"):
return redirect(self.protectedurl.url)
file_ = self.protectedfile.uploaded_file
filename = str(file_)[1:]
content_type = guess_type(filename)[0]
response = HttpResponse(
file_.read(),
content_type=content_type,
)
disposition = "inline" if content_type.startswith("image") else "attachment"
response["Content-Disposition"] = f'{disposition}; filename="{filename}"'
return response
class ProtectedFile(ProtectedItem):
uploaded_file = models.FileField()
class ProtectedUrl(ProtectedItem):
url = models.URLField(max_length=2048)
|
# -*- coding: utf-8 -*-
"""Tests dict input objects for `tackle.providers.yubikey.hooks.yubikey` module."""
# import pytest
# import os
# from tackle.main import tackle
#
#
# def test_provider_yubikey_hook_read(change_dir):
# """Verify the hook call works successfully."""
# o = tackle('.', context_file='read.yaml', no_input=True)
# assert 'owner' in o['read'].keys()
|
from __future__ import absolute_import
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from math3.objects.matrix3 import Matrix3
from math3.objects.matrix4 import Matrix4
from math3.objects.quaternion import Quaternion
from math3.objects.vector3 import Vector3
from math3.objects.vector4 import Vector4
from math3 import vector4
class test_object_vector4(unittest.TestCase):
_shape = (4,)
_size = np.multiply.reduce(_shape)
def test_imports(self):
import math3
math3.Vector4()
math3.vector4.Vector4()
math3.objects.vector4.Vector4()
from math3 import Vector4
from math3.objects import Vector4
from math3.objects.vector4 import Vector4
def test_create(self):
v = Vector4()
self.assertTrue(np.array_equal(v, [0.,0.,0.,0.]))
self.assertEqual(v.shape, self._shape)
v = Vector4([1.,2.,3.,4.])
self.assertTrue(np.array_equal(v, [1.,2.,3.,4.]))
self.assertEqual(v.shape, self._shape)
v = Vector4.from_vector3([1.,2.,3.], w=0.0)
self.assertTrue(np.array_equal(v, [1.,2.,3.,0.]))
self.assertEqual(v.shape, self._shape)
v = Vector4(Vector4())
self.assertTrue(np.array_equal(v, [0.,0.,0.,0.]))
self.assertEqual(v.shape, self._shape)
m = Matrix4.from_translation([1., 2., 3.])
v = Vector4.from_matrix44_translation(m)
self.assertTrue(np.array_equal(v, [1.,2.,3.,1.]))
m = Matrix4.from_translation([1., 2., 3.])
v = Vector4(m)
self.assertTrue(np.array_equal(v, [1.,2.,3.,1.]))
def test_inverse(self):
v = Vector4([1.,2.,3.,4.])
self.assertTrue(np.array_equal(v.inverse, [-1.,-2.,-3.,-4.]))
def test_normalise(self):
v = Vector4([1.,1.,1.,1.])
np.testing.assert_almost_equal(v.normalised, [0.5, 0.5, 0.5, 0.5], decimal=5)
v.normalise()
np.testing.assert_almost_equal(v, [0.5, 0.5, 0.5, 0.5], decimal=5)
def test_operators_matrix33(self):
v = Vector4()
m = Matrix3.from_x_rotation(0.5)
# add
self.assertRaises(ValueError, lambda: v + m)
# subtract
self.assertRaises(ValueError, lambda: v - m)
# multiply
self.assertRaises(ValueError, lambda: v - m)
# divide
self.assertRaises(ValueError, lambda: v / m)
def test_operators_matrix44(self):
v = Vector4()
m = Matrix4.from_x_rotation(0.5)
# add
self.assertRaises(ValueError, lambda: v + m)
# subtract
self.assertRaises(ValueError, lambda: v - m)
# multiply
self.assertRaises(ValueError, lambda: v * m)
# divide
self.assertRaises(ValueError, lambda: v / m)
def test_operators_quaternion(self):
v = Vector4()
q = Quaternion.from_x_rotation(0.5)
# add
self.assertRaises(ValueError, lambda: v + q)
# subtract
self.assertRaises(ValueError, lambda: v - q)
# multiply
self.assertRaises(ValueError, lambda: v * q)
# divide
self.assertRaises(ValueError, lambda: v / q)
def test_operators_vector3(self):
v1 = Vector4()
v2 = Vector3([1.,2.,3.])
# add
self.assertRaises(ValueError, lambda: v1 + v2)
# subtract
self.assertRaises(ValueError, lambda: v1 - v2)
# multiply
self.assertRaises(ValueError, lambda: v1 * v2)
# divide
#self.assertRaises(ValueError, lambda: v1 / v2)
# or
self.assertRaises(ValueError, lambda: v1 | v2)
# xor
#self.assertRaises(ValueError, lambda: v1 ^ v2)
# ==
self.assertRaises(ValueError, lambda: Vector4() == Vector3())
# !=
self.assertRaises(ValueError, lambda: Vector4() != Vector3([1.,1.,1.]))
def test_operators_vector4(self):
v1 = Vector4()
v2 = Vector4([1.,2.,3.,4.])
# add
self.assertTrue(np.array_equal(v1 + v2, [1.,2.,3.,4.]))
# subtract
self.assertTrue(np.array_equal(v1 - v2, [-1.,-2.,-3.,-4]))
# multiply
self.assertTrue(np.array_equal(v1 * v2, [0.,0.,0.,0.]))
# divide
self.assertTrue(np.array_equal(v1 / v2, [0.,0.,0.,0.]))
# or
self.assertTrue(np.array_equal(v1 | v2, vector4.dot([0.,0.,0.,0.], [1.,2.,3.,4.])))
# xor
#self.assertTrue(np.array_equal(v1 ^ v2, vector4.cross([0.,0.,0.,0.], [1.,2.,3.,4.])))
# ==
self.assertTrue(Vector4() == Vector4())
self.assertFalse(Vector4() == Vector4([1.,1.,1.,1.]))
# !=
self.assertTrue(Vector4() != Vector4([1.,1.,1.,1.]))
self.assertFalse(Vector4() != Vector4())
def test_operators_number(self):
v1 = Vector4([1.,2.,3.,4.])
fv = np.empty((1,), dtype=[('i', np.int16, 1),('f', np.float32, 1)])
fv[0] = (2, 2.0)
# add
self.assertTrue(np.array_equal(v1 + 1., [2., 3., 4., 5.]))
self.assertTrue(np.array_equal(v1 + 1, [2., 3., 4., 5.]))
self.assertTrue(np.array_equal(v1 + np.float(1.), [2., 3., 4., 5.]))
self.assertTrue(np.array_equal(v1 + fv[0]['f'], [3., 4., 5., 6.]))
self.assertTrue(np.array_equal(v1 + fv[0]['i'], [3., 4., 5., 6.]))
# subtract
self.assertTrue(np.array_equal(v1 - 1., [0., 1., 2., 3.]))
self.assertTrue(np.array_equal(v1 - 1, [0., 1., 2., 3.]))
self.assertTrue(np.array_equal(v1 - np.float(1.), [0., 1., 2., 3.]))
self.assertTrue(np.array_equal(v1 - fv[0]['f'], [-1., 0., 1., 2.]))
self.assertTrue(np.array_equal(v1 - fv[0]['i'], [-1., 0., 1., 2.]))
# multiply
self.assertTrue(np.array_equal(v1 * 2., [2., 4., 6., 8.]))
self.assertTrue(np.array_equal(v1 * 2, [2., 4., 6., 8.]))
self.assertTrue(np.array_equal(v1 * np.float(2.), [2., 4., 6., 8.]))
self.assertTrue(np.array_equal(v1 * fv[0]['f'], [2., 4., 6., 8.]))
self.assertTrue(np.array_equal(v1 * fv[0]['i'], [2., 4., 6., 8.]))
# divide
self.assertTrue(np.array_equal(v1 / 2., [.5, 1., 1.5, 2.]))
self.assertTrue(np.array_equal(v1 / 2, [.5, 1., 1.5, 2.]))
self.assertTrue(np.array_equal(v1 / np.float(2.), [.5, 1., 1.5, 2.]))
self.assertTrue(np.array_equal(v1 / fv[0]['f'], [.5, 1., 1.5, 2.]))
self.assertTrue(np.array_equal(v1 / fv[0]['i'], [.5, 1., 1.5, 2.]))
# or
self.assertRaises(ValueError, lambda: v1 | .5)
self.assertRaises(ValueError, lambda: v1 | 5)
self.assertRaises(ValueError, lambda: v1 | np.float(2.))
self.assertRaises(ValueError, lambda: v1 | fv[0]['f'])
self.assertRaises(ValueError, lambda: v1 | fv[0]['i'])
# xor
self.assertRaises(ValueError, lambda: v1 ^ .5)
self.assertRaises(ValueError, lambda: v1 ^ 5)
self.assertRaises(ValueError, lambda: v1 ^ np.float(2.))
self.assertRaises(ValueError, lambda: v1 ^ fv[0]['f'])
self.assertRaises(ValueError, lambda: v1 ^ fv[0]['i'])
# ==
self.assertRaises(ValueError, lambda: v1 == .5)
self.assertRaises(ValueError, lambda: v1 == 5)
self.assertRaises(ValueError, lambda: v1 == np.float(2.))
self.assertRaises(ValueError, lambda: v1 == fv[0]['f'])
self.assertRaises(ValueError, lambda: v1 == fv[0]['i'])
# !=
self.assertRaises(ValueError, lambda: v1 != .5)
self.assertRaises(ValueError, lambda: v1 != 5)
self.assertRaises(ValueError, lambda: v1 != np.float(2.))
self.assertRaises(ValueError, lambda: v1 != fv[0]['f'])
self.assertRaises(ValueError, lambda: v1 != fv[0]['i'])
def test_bitwise(self):
v1 = Vector4([1.,0.,0.,1.])
v2 = Vector4([0.,1.,0.,1.])
# or (dot)
self.assertTrue(np.array_equal(v1 | v2, vector4.dot(v1, v2)))
def test_accessors(self):
v = Vector4(np.arange(self._size))
self.assertTrue(np.array_equal(v.xy,[0,1]))
self.assertTrue(np.array_equal(v.xyz,[0,1,2]))
self.assertTrue(np.array_equal(v.xz,[0,2]))
self.assertTrue(np.array_equal(v.xyz,[0,1,2]))
self.assertEqual(v.x, 0)
self.assertEqual(v.y, 1)
self.assertEqual(v.z, 2)
v.x = 1
self.assertEqual(v.x, 1)
self.assertEqual(v[0], 1)
v.x += 1
self.assertEqual(v.x, 2)
self.assertEqual(v[0], 2)
if __name__ == '__main__':
unittest.main()
|
import json
from io import StringIO
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import xmltodict
import pandas as pd
from datetime import datetime
import xml.etree.ElementTree as ET
from foundation.models import AppleHealthKitDataDB,AppleHealthKitUpload
class Command(BaseCommand):
help = '-'
def process_instrument(self,datum,get_data_type):
print(get_data_type)
input_path = str(datum.data_file.path)
root = ET.parse(input_path).getroot()
values = []
creation_date = []
try:
for record in root.findall(".Record/[@type='{}']".format(get_data_type)):
values.append(float(record.get('value')))
creation_date.append(datetime.strptime(record.get('creationDate'), "%Y-%m-%d %H:%M:%S %z"))
# print(creation_date)
except Exception as e:
print(e)
for date,value in zip(creation_date,values):
# print(date,value) #For debugging purpose only
try:
AppleHealthKitDataDB.objects.create(
creation_date = date,
value = value,
attribute_name = get_data_type,
user = datum.user
)
except Exception as e:
print(e)
@transaction.atomic
def process(self,datum):
is_processed = False
while is_processed == False:
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierHeartRate')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierStepCount')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierDistanceWalkingRunning')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierBasalEnergyBurned')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierBodyMassIndex')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierHeight')
except Exception as e:
continue
try:
self.process_instrument(datum,'HKQuantityTypeIdentifierBodyMass')
except Exception as e:
continue
datum.was_processed = True
datum.save()
is_processed = True
def handle(self, *args, **options):
data = AppleHealthKitUpload.objects.filter(was_processed=False)
for datum in data:
try:
self.process(datum)
print("Successfully processed upload with id"+str(datum.id))
except Exception as e:
print("Failed Processing Upload with id" +str(datum.id))
self.stdout.write(self.style.SUCCESS('Successfully processed Apple HealthKit Data File'))
|
import json
import time
from datetime import datetime, timedelta
from urllib import parse
# prevent Error: Failed to import _strptime because the import lockis held by another thread.
# see https://www.raspberrypi.org/forums/viewtopic.php?t=166912
import _strptime
import xbmc
_ON_SETTING_CHANGE_EVENTS = "onSettingChangeEvents"
_SETTING_CHANGE_EVENTS_MAX_SECS = 5
_SETTING_CHANGE_EVENTS_ACTIVE = 0
_WINDOW_TV_GUIDE = 10702
_WINDOW_RADIO_GUIDE = 10707
_PLAY_PVR_URL_PATTERN = "pvr://channels/%s/%s/%s_%i.pvr"
PVR_TV = "tv"
PVR_RADIO = "radio"
DEFAULT_TIME = "00:00"
def deactivateOnSettingsChangedEvents(addon):
addon.setSetting(_ON_SETTING_CHANGE_EVENTS, "%i" % int(time.time()))
def activateOnSettingsChangedEvents(addon):
addon.setSetting(_ON_SETTING_CHANGE_EVENTS, "%i" %
_SETTING_CHANGE_EVENTS_ACTIVE)
def isSettingsChangedEvents(addon):
current = int("0%s" % addon.getSetting(_ON_SETTING_CHANGE_EVENTS))
now = int(time.time())
return now - current > _SETTING_CHANGE_EVENTS_MAX_SECS
def parse_xbmc_shortdate(date):
format = xbmc.getRegion("dateshort")
return datetime.strptime(date, format)
def parse_time(s_time, i_day=0):
if s_time == "":
s_time = DEFAULT_TIME
if s_time.lower().endswith(" am") or s_time.lower().endswith(" pm"):
t_time = time.strptime(s_time, "%I:%M %p")
else:
t_time = time.strptime(s_time, "%H:%M")
return timedelta(
days=i_day,
hours=t_time.tm_hour,
minutes=t_time.tm_min)
def abs_time_diff(td1, td2):
return abs(time_diff(td1, td2))
def time_diff(td1, td2):
s1 = td1.days * 86400 + td1.seconds
s2 = td2.days * 86400 + td2.seconds
return s2 - s1
def time_duration_str(s_start, s_end):
_dt_start = parse_time(s_start)
_dt_end = parse_time(s_end, i_day=1)
_secs = time_diff(_dt_start, _dt_end) % 86400
return format_from_seconds(_secs)
def format_from_seconds(secs):
return "%02i:%02i" % (secs // 3600, (secs % 3600) // 60)
def get_current_epg_view():
if xbmc.getCondVisibility("Window.IsActive(%s)" % _WINDOW_TV_GUIDE):
return PVR_TV
elif xbmc.getCondVisibility("Window.IsActive(%s)" % _WINDOW_RADIO_GUIDE):
return PVR_RADIO
else:
return None
def is_fullscreen():
return xbmc.getCondVisibility("System.IsFullscreen")
def get_pvr_channel_path(type, channelno):
try:
channelno = int(channelno)
_result = json_rpc("PVR.GetChannelGroups", {
"channeltype": type})
channelGroupAll = _result["channelgroups"][0]["label"]
_result = json_rpc("PVR.GetClients")
pvrClients = _result["clients"]
_result = json_rpc("PVR.GetChannels", {
"channelgroupid": "all%s" % type, "properties": ["uniqueid", "clientid", "channelnumber"]})
channel = next(
filter(lambda c: c["channelnumber"] == channelno, _result["channels"]), None)
if not channel:
return None
pvrClient = list(filter(
lambda _c: _c["supportsepg"] == True and _c["clientid"] == channel["clientid"], pvrClients))[0]
if channelGroupAll and pvrClient and channel:
return _PLAY_PVR_URL_PATTERN % (type, parse.quote(channelGroupAll), pvrClient["addonid"], channel["uniqueid"])
except:
pass
return None
def get_volume(or_default=100):
try:
_result = json_rpc("Application.GetProperties",
{"properties": ["volume"]})
return _result["volume"]
except:
xbmc.log(
"jsonrpc call failed in order to get current volume: Application.GetProperties", xbmc.LOGERROR)
return or_default
def set_volume(vol):
xbmc.executebuiltin("SetVolume(%i)" % vol)
def set_powermanagement_displaysoff(value):
json_rpc("Settings.SetSettingValue", {
"setting": "powermanagement.displaysoff", "value": value})
def set_windows_unlock(value):
if xbmc.getCondVisibility("system.platform.windows"):
import ctypes
ctypes.windll.kernel32.SetThreadExecutionState(
0x80000002 if value else 0x80000000)
return value
def json_rpc(jsonmethod, params=None):
kodi_json = {}
kodi_json["jsonrpc"] = "2.0"
kodi_json["method"] = jsonmethod
if not params:
params = {}
kodi_json["params"] = params
kodi_json["id"] = 1
json_response = xbmc.executeJSONRPC(json.dumps(kodi_json))
json_object = json.loads(json_response)
return json_object["result"] if "result" in json_object else None
|
import numpy as np
import math
from keras.initializers import RandomUniform
from keras.models import model_from_json
from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Input, merge, Lambda, Activation
from keras.optimizers import Adam
import tensorflow as tf
import keras.backend as K
import os
class ActorNetwork(object):
def __init__(self, sess, state_size, action_size, action_bound, tau, learning_rate):
self.sess = sess
self.tau = tau
self.s_dim = state_size
self.a_dim = action_size
self.learning_rate = learning_rate
self.action_bound = action_bound
self.stat_ops = []
self.stat_names = []
K.set_session(sess)
#Now create the model
self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)
self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size)
self.action_gradient = tf.placeholder(tf.float32,[None, action_size])
self.out = self.model.output
self.params_grad = tf.gradients(self.out, self.weights, -self.action_gradient)
grads = zip(self.params_grad, self.weights)
self.optimize = tf.train.AdamOptimizer(learning_rate).apply_gradients(grads)
self.stat_ops += [tf.reduce_mean(self.out)]
self.stat_names += ["Mean action"]
self.sess.run(tf.global_variables_initializer())
def train(self, states, action_grads):
self.sess.run(self.optimize, feed_dict={
self.state: states,
self.action_gradient: action_grads
})
def predict_target(self, states):
return self.target_model.predict_on_batch(states)
def predict(self, states):
return self.model.predict_on_batch(states)
def target_train(self):
actor_weights = self.model.get_weights()
actor_target_weights = self.target_model.get_weights()
for i in range(len(actor_weights)):
actor_target_weights[i] = self.tau * actor_weights[i] + (1 - self.tau)* actor_target_weights[i]
self.target_model.set_weights(actor_target_weights)
def create_actor_network(self, state_size,action_dim):
S = Input(shape=[state_size])
h0 = Dense(400, activation="relu", kernel_initializer="he_uniform")(S)
h1 = Dense(300, activation="relu", kernel_initializer="he_uniform")(h0)
V = Dense(action_dim, activation="tanh",
kernel_initializer=RandomUniform(minval=-3e-3, maxval=3e-3, seed=None))(h1)
model = Model(inputs=S,outputs=V)
return model, model.trainable_weights, S
def get_stats(self, stats_sample):
actor_values = self.sess.run(self.stat_ops, feed_dict={
self.state: stats_sample['state0'],
})
names = self.stat_names[:]
assert len(names) == len(actor_values)
stats = dict(zip(names, actor_values))
return stats
def hard_target_update(self):
self.target_model.set_weights(self.model.get_weights())
def save_weights(self, filepath, overwrite=False):
print("Saving weights")
self.model.save_weights(filepath, overwrite=overwrite)
def load_weights(self, filepath):
self.model.load_weights(filepath)
def save_target_weights(self, filepath, overwrite=False):
print("Saving weights")
self.target_model.save_weights(filepath, overwrite=overwrite)
def load_target_weights(self, filepath):
self.target_model.load_weights(filepath)
def print_target_weights(self):
print (self.target_model.get_weights())
def print_weights(self):
print (self.model.get_weights())
|
import cv2
import numpy
import time
class CaptureManager:
def __init__(self,capture, previewWindowManager = None, shouldMirrorPreview = False) -> None:
self.previewWindowManager = previewWindowManager
self.shouldMirrorPreview = shouldMirrorPreview
self._capture = capture
self._channel = 0
self._enterdFrame = False
self._frame = None
self._imageFilename = None
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
self._startTime = None
self._framesElaspsed = 0
self._fpsEstimate = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
if self._channel != value:
self._channel = value
self._frame = None
@property
def frame(self):
if self._enterdFrame and self._frame is None:
_, self._frame = self._capture.retrieve(self._frame, self.channel)
return self._frame
@property
def isWritingImage(self):
return self._imageFilename is not None
@property
def isWritingVideo(self):
return self._videoFilename is not None
def enterFrame(self):
assert not self._enterdFrame, "previous enterFrame() has no matching exitFrame()"
if self._capture is not None:
self._enterdFrame = self._capture.grab()
def exitFrame(self):
if self.frame is None:
self._enterdFrame = False
return
if self._framesElaspsed == 0:
self._startTime = time.time()
else:
timeElaspsed = time.time() - self._startTime
self._fpsEstimate = self._framesElaspsed / timeElaspsed
self._framesElaspsed += 1
if self.previewWindowManager is not None:
if self.shouldMirrorPreview:
mirroredFrame = numpy.fliplr(self._frame)
self.previewWindowManager.show(mirroredFrame)
else:
self.previewWindowManager.show(self._frame)
if self.isWritingImage:
cv2.imwrite(self._imageFilename, self._frame)
self._imageFilename = None
self._writeVideoFrame()
self._frame = None
self._enterdFrame = False
def writeImage(self, filename):
self._imageFilename = filename
def startWritingVideo(self, filename, encoding = cv2.VideoWriter_fourcc('M','J','P','G')):
self._videoFilename = filename
self._videoEncoding = encoding
def stopWritingVideo(self):
self._videoFilename = None
self._videoEncoding = None
self._videoWriter = None
def _writeVideoFrame(self):
if not self.isWritingVideo:
return
if self._videoWriter is None:
fps = self._capture.get(cv2.CAP_PROP_FPS)
if fps <= 0.0:
if self._framesElaspsed < 20:
return
else:
fps = self._fpsEstimate
size = (int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
print(self._videoFilename, self._videoEncoding, fps, size)
self._videoWriter = cv2.VideoWriter(self._videoFilename, self._videoEncoding, 20, size)
self._videoWriter.write(self._frame)
class WindowManager:
def __init__(self, windowName, keypressCallback = None) -> None:
self.keypressCallback = keypressCallback
self._windowName = windowName
self._isWindowCreated = False
@property
def isWindowCreated(self):
return self._isWindowCreated
def createWindow(self):
cv2.namedWindow(self._windowName)
self._isWindowCreated = True
def show(self, frame):
cv2.imshow(self._windowName, frame)
def destroyWindow(self):
cv2.destroyWindow(self._windowName)
self._isWindowCreated = False
def processEvents(self):
keycode = cv2.waitKey(1)
if self.keypressCallback is not None and keycode != -1:
self.keypressCallback(keycode)
|
from typing import List
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
l, r = 0, len(s) - 1
while l < r:
s[l], s[r] = s[r], s[l]
l += 1
r -= 1
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements EfficientNet Lite model for Quantization Aware Training.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
import functools
import tensorflow.compat.v1 as tf
import efficientnet_model
class FunctionalModelBuilder:
"""A class that builds functional api keras models."""
def __init__(self, name='FunctionalModel'):
self.name = name
self.built = False
def build(self, input_shape: tf.TensorShape):
del input_shape # Only used by subclasses.
self.built = True
def call(self, inputs, training):
raise NotImplementedError('This function is implemented in subclasses.')
def get_functional_model(self, input_shape, training):
functional_inputs = tf.keras.Input(
shape=input_shape[1:], batch_size=input_shape[0])
functional_outputs = self(functional_inputs, training)
return tf.keras.Model(inputs=functional_inputs, outputs=functional_outputs)
def __call__(self, inputs, training):
if not self.built:
if tf.nest.is_nested(inputs):
input_shapes = [
input_tensor.shape for input_tensor in tf.nest.flatten(inputs)
]
else:
input_shapes = inputs.shape
self.build(input_shapes[1:])
return self.call(inputs, training)
class FunctionalMBConvBlock(FunctionalModelBuilder):
"""A class of MBConv: Mobile Inverted Residual Bottleneck.
Attributes:
endpoints: dict. A list of internal tensors.
"""
def __init__(self, block_args, global_params, dtype, name, **kwargs):
"""Initializes a MBConv block.
Args:
block_args: BlockArgs, arguments to create a Block.
global_params: GlobalParams, a set of global parameters.
dtype: Layer type.
name: Layer name.
**kwargs: Keyword arguments.
"""
super().__init__(**kwargs)
self._block_args = block_args
self._dtype = dtype
self._name = name
self._batch_norm_momentum = global_params.batch_norm_momentum
self._batch_norm_epsilon = global_params.batch_norm_epsilon
self._batch_norm = global_params.batch_norm
self._data_format = global_params.data_format
self._conv_kernel_initializer = tf.compat.v2.keras.initializers.VarianceScaling(
scale=2.0, mode='fan_out', distribution='untruncated_normal')
if self._data_format == 'channels_first':
self._channel_axis = 1
self._spatial_dims = [2, 3]
else:
self._channel_axis = -1
self._spatial_dims = [1, 2]
self._relu_fn = functools.partial(tf.keras.layers.ReLU, 6.0)
self._survival_prob = global_params.survival_prob
self.endpoints = None
def block_args(self):
return self._block_args
def build(self, input_shape):
"""Builds block according to the arguments."""
conv2d_id = 0
batch_norm_id = 0
if self._block_args.expand_ratio != 1:
self._expand_conv = tf.keras.layers.Conv2D(
filters=(self._block_args.input_filters *
self._block_args.expand_ratio),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/conv2d')
conv2d_id += 1
self._bn0 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization')
batch_norm_id += 1
self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=[
self._block_args.kernel_size, self._block_args.kernel_size
],
strides=self._block_args.strides,
depthwise_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/depthwise_conv2d')
batch_norm_name_suffix = f'_{batch_norm_id}' if batch_norm_id else ''
self._bn1 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization{batch_norm_name_suffix}')
batch_norm_id += 1
# Output phase.
conv2d_name_suffix = f'_{conv2d_id}' if conv2d_id else ''
self._project_conv = tf.keras.layers.Conv2D(
filters=self._block_args.output_filters,
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._name}/conv2d{conv2d_name_suffix}')
batch_norm_name_suffix = f'_{batch_norm_id}' if batch_norm_id else ''
self._bn2 = self._batch_norm(
axis=self._channel_axis,
momentum=self._batch_norm_momentum,
epsilon=self._batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._name}/tpu_batch_normalization{batch_norm_name_suffix}')
self._spartial_dropout_2d = tf.keras.layers.SpatialDropout2D(
rate=1 - self._survival_prob, dtype=self._dtype)
def call(self, inputs, training):
"""Implementation of call().
Args:
inputs: the inputs tensor.
training: boolean, whether the model is constructed for training.
Returns:
A output tensor.
"""
x = inputs
if self._block_args.expand_ratio != 1:
x = self._relu_fn()(self._bn0(self._expand_conv(x), training=training))
x = self._relu_fn()(self._bn1(self._depthwise_conv(x), training=training))
self.endpoints = {'expansion_output': x}
x = self._bn2(self._project_conv(x), training=training)
if (all(s == 1 for s in self._block_args.strides) and
inputs.get_shape().as_list()[-1] == x.get_shape().as_list()[-1]):
# Apply only if skip connection presents.
if self._survival_prob:
x = self._spartial_dropout_2d(x)
x = tf.keras.layers.Add(dtype=self._dtype)([x, inputs])
return x
class FunctionalModel(FunctionalModelBuilder):
"""A class implements tf.keras.Model for MNAS-like model.
Reference: https://arxiv.org/abs/1807.11626
"""
def __init__(self,
model_name,
blocks_args=None,
global_params=None,
features_only=None,
pooled_features_only=False,
**kwargs):
"""Initializes an `Model` instance.
Args:
model_name: Name of the model.
blocks_args: A list of BlockArgs to construct block modules.
global_params: GlobalParams, a set of global parameters.
features_only: build the base feature network only.
pooled_features_only: build the base network for features extraction
(after 1x1 conv layer and global pooling, but before dropout and fc
head).
**kwargs: Keyword arguments.
Raises:
ValueError: when blocks_args is not specified as a list.
"""
super().__init__(**kwargs)
if not isinstance(blocks_args, list):
raise ValueError('blocks_args should be a list.')
self._model_name = model_name
self._global_params = global_params
self._blocks_args = blocks_args
self._dtype = 'float32'
if self._global_params.use_bfloat16:
self._dtype = 'mixed_bfloat16'
self._features_only = features_only
self._pooled_features_only = pooled_features_only
self._relu_fn = functools.partial(tf.keras.layers.ReLU, 6.0)
self._batch_norm = global_params.batch_norm
self._fix_head_stem = global_params.fix_head_stem
self._conv_kernel_initializer = tf.compat.v2.keras.initializers.VarianceScaling(
scale=2.0, mode='fan_out', distribution='untruncated_normal')
self._dense_kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1.0 / 3.0, mode='fan_out', distribution='uniform')
self.endpoints = None
def build(self, input_shape):
"""Builds a model."""
del input_shape # Unused.
self._blocks = []
batch_norm_momentum = self._global_params.batch_norm_momentum
batch_norm_epsilon = self._global_params.batch_norm_epsilon
if self._global_params.data_format == 'channels_first':
channel_axis = 1
self._spatial_dims = [2, 3]
else:
channel_axis = -1
self._spatial_dims = [1, 2]
# Stem part.
self._conv_stem = tf.keras.layers.Conv2D(
filters=efficientnet_model.round_filters(32, self._global_params,
self._fix_head_stem),
kernel_size=[3, 3],
strides=[2, 2],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._model_name}/stem/conv2d')
self._bn0 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon,
name=f'{self._model_name}/stem/tpu_batch_normalization')
# Builds blocks.
for i, block_args in enumerate(self._blocks_args):
assert block_args.num_repeat > 0
assert block_args.space2depth in [0, 1, 2]
# Update block input and output filters based on depth multiplier.
input_filters = efficientnet_model.round_filters(block_args.input_filters,
self._global_params)
output_filters = efficientnet_model.round_filters(
block_args.output_filters, self._global_params)
if self._fix_head_stem and (i == 0 or i == len(self._blocks_args) - 1):
repeats = block_args.num_repeat
else:
repeats = efficientnet_model.round_repeats(block_args.num_repeat,
self._global_params)
block_args = block_args._replace(
input_filters=input_filters,
output_filters=output_filters,
num_repeat=repeats)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(
FunctionalMBConvBlock(
block_args=block_args,
global_params=self._global_params,
dtype=self._dtype,
name=f'{self._model_name}/blocks_{len(self._blocks)}'))
if block_args.num_repeat > 1: # rest of blocks with the same block_arg
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for _ in range(block_args.num_repeat - 1):
self._blocks.append(
FunctionalMBConvBlock(
block_args,
self._global_params,
dtype=self._dtype,
name=f'{self._model_name}/blocks_{len(self._blocks)}'))
# Head part.
self._conv_head = tf.keras.layers.Conv2D(
filters=efficientnet_model.round_filters(1280, self._global_params,
self._fix_head_stem),
kernel_size=[1, 1],
strides=[1, 1],
kernel_initializer=self._conv_kernel_initializer,
padding='same',
data_format=self._global_params.data_format,
use_bias=False,
dtype=self._dtype,
name=f'{self._model_name}/head/conv2d')
self._bn1 = self._batch_norm(
axis=channel_axis,
momentum=batch_norm_momentum,
epsilon=batch_norm_epsilon,
dtype=self._dtype,
name=f'{self._model_name}/head/tpu_batch_normalization')
if self._global_params.num_classes:
self._fc = tf.keras.layers.Dense(
self._global_params.num_classes,
kernel_initializer=self._dense_kernel_initializer,
dtype=self._dtype,
name=f'{self._model_name}/head/dense')
else:
self._fc = None
if self._global_params.dropout_rate > 0:
self._dropout = tf.keras.layers.Dropout(
self._global_params.dropout_rate, dtype=self._dtype)
else:
self._dropout = None
def call(self, inputs, training):
"""Implementation of call().
Args:
inputs: input tensors.
training: boolean, whether the model is constructed for training.
Returns:
output tensors.
"""
outputs = None
self.endpoints = {}
reduction_idx = 0
# Calls Stem layers
outputs = self._relu_fn()(
self._bn0(self._conv_stem(inputs), training=training))
self.endpoints['stem'] = outputs
# Calls blocks.
for idx, block in enumerate(self._blocks):
is_reduction = False # reduction flag for blocks after the stem layer
if ((idx == len(self._blocks) - 1) or
self._blocks[idx + 1].block_args().strides[0] > 1):
is_reduction = True
reduction_idx += 1
survival_prob = self._global_params.survival_prob
if survival_prob:
drop_rate = 1.0 - survival_prob
survival_prob = 1.0 - drop_rate * float(idx) / len(self._blocks)
outputs = block(outputs, training)
self.endpoints['block_%s' % idx] = outputs
if is_reduction:
self.endpoints['reduction_%s' % reduction_idx] = outputs
if block.endpoints:
for k, v in block.endpoints.items():
self.endpoints['block_%s/%s' % (idx, k)] = v
if is_reduction:
self.endpoints['reduction_%s/%s' % (reduction_idx, k)] = v
self.endpoints['features'] = outputs
if not self._features_only:
outputs = self._relu_fn()(
self._bn1(self._conv_head(outputs), training=training))
self.endpoints['head_1x1'] = outputs
shape = outputs.get_shape().as_list()
outputs = tf.keras.layers.AveragePooling2D(
pool_size=(shape[self._spatial_dims[0]],
shape[self._spatial_dims[1]]),
strides=[1, 1],
padding='valid',
dtype=self._dtype)(
outputs)
self.endpoints['pooled_features'] = outputs
if not self._pooled_features_only:
if self._dropout:
outputs = self._dropout(outputs)
self.endpoints['global_pool'] = outputs
if self._fc:
outputs = tf.keras.layers.Flatten(dtype=self._dtype)(outputs)
outputs = self._fc(outputs)
self.endpoints['head'] = outputs
return outputs
|
#!/usr/bin/python
# Authors:
# 2016: Jayamine Alupotha https://github.com/jaymine
# 2020: Wolfgang Fahl https://github.com/WolfgangFahl
import socket
import json
import os
import struct
import threading
import time
import uuid
from enum import IntEnum
from queue import Queue, Empty
from subprocess import *
from threading import Timer
from threading import Thread
class State(IntEnum):
""" Eventbus state see https://github.com/vert-x3/vertx-bus-bower/blob/master/vertx-eventbus.js"""
CONNECTING=0
OPEN=1
CLOSING=2
CLOSED=3
class RepeatTimer(Timer):
""" repeating timer """
def run(self):
while not self.finished.wait(self.interval):
self.function(*self.args, **self.kwargs)
class TcpEventBusBridgeStarter():
""" starter for the java based TcpEventBusBridge and test EchoVerticle """
def __init__(self,port,jar=None,waitFor="EchoVerticle started",debug=False):
"""
construct me
Args:
port(int): the port to listen to
jar(str): the path to the TcpEventBusBridge jar file
waitFor(str): the output string on stderr of the java process to waitFor
debug(bool): True if debugging output should be shown else False - default: False
"""
self.port=port
self.waitFor=waitFor
self.debug=debug
if jar is None:
scriptpath=os.path.dirname(os.path.abspath(__file__))
if self.debug:
print("scriptpath is %s" % scriptpath)
self.jar=scriptpath+"/TcpEventBusBridge.jar"
else:
self.jar=jar
self.started=False
def checkPort(self):
"""
check that a socket connection is possible on the given port
Args:
port(int): the port to check
Returns:
bool: True if the port is available else False
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host='localhost'
check=None
try:
sock.connect((host, self.port))
check=True
except ConnectionRefusedError:
check=False
finally:
sock.close()
return check
def start(self):
""" start the jar file"""
self._javaStart()
def wait(self,timeOut=30.0,timeStep=0.1):
""" wait for the java server to be started
Args:
timeOut(float): the timeOut in secs after which the wait fails with an Exception
timeStep(float): the timeStep in secs in which the state should be regularly checked
:raise:
:Exception: wait timed out
"""
timeLeft=timeOut;
while not self.started and timeLeft>0:
time.sleep(timeStep)
timeLeft=timeLeft-timeStep
if timeLeft<=0:
raise Exception("wait for start timedOut after %.3f secs" % (timeOut))
if self.debug:
print("wait for start successful after %.3f secs" % (timeOut-timeLeft))
def stop(self):
""" stop the jar file"""
self.process.kill()
self.started=False
def _handleJavaOutput(self):
""" handle the output of the java program"""
out=self.process.stderr
for bline in iter(out.readline, b''):
line=bline.decode('utf8')
if self.debug:
print("java: %s" % line)
if self.waitFor in line:
self.started=True
out.close()
def _javaStart(self):
"""
call java jar
"""
if self.debug:
print ("starting java -jar %s" % self.jar)
self.process = Popen(['java', '-jar' , self.jar, "--port",str(self.port)], stderr=PIPE)
t = Thread(target=self._handleJavaOutput)
t.daemon = True # thread dies with the program
t.start()
class Eventbus(object):
"""
Vert.x TCP eventbus client for python
:ivar headers: any headers to be sent as per the vertx-tcp-eventbus-bridge specification
:ivar state: the state of the the eventbus
:vartype state: State.CONNECTING: State
:ivar host: 'localhost' the host the eventbus is connected to
:vartype host: str
:ivar port: 7000 : the port to be used for the socket connection
:vartype port: int
:ivar pingInterval:5000:the ping interval in millisecs
:vartype pingInterval: int
:ivar pongCount:0:the number of pongs received
:vartype pongCount: int
:ivar timeOut: DEFAULT_TIMEOUT:time in secs to be used as the socket timeout
:vartype timeOut: float
:ivar debug: False: True if debugging should be enabled
:vartype debug: bool
:ivar onError:onError:the function to handle errors messages with no address
:vartype onError: function
:ivar handlers:{}: the dict of handlers for incoming messages
:vartype handlers: dict
:ivar replyHandler:{}: the dict of handlers for reply messages
:vartype replyHandlers: dict
"""
DEFAULT_TIMEOUT=60.0
def __init__(self, host='localhost', port=7000,options=None, onError=None,timeOut=None,connect=True,debug=False):
"""
constructor
Args:
host(str): the host to connect to - default: 'localhost'
port(int): the port to use - default: 7000
options(dict): e.g. { vertxbus_ping_interval=5000 }
onError(function): the handler to use for erromessages with no address- default: None will be replaced by default onError
timeOut(float): time in secs to be used as the socket timeout - default: 60 secs - the minimium timeOut is 10 msecs and will be enforced
connect(bool): True if the eventbus should automatically be opened - default: True
debug(bool): True if debugging should be enabled - default: False
:raise:
:IOError: - the socket could not be opened
:Exception: - some other issue e.g. with starting the listening thread
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.handlers = {}
self.replyHandler = {}
self.headers = {}
self.host = host
self.port = port
self.debug=debug
if options is None:
self.pingInterVal=5000;
else:
if "vertxbus_ping_interval" in options:
self.pingInterVal=options["vertxbus_ping_interval"];
if onError is None:
self.onError=self.onErrorHandler
else:
self.onError=onError
self.pongCount=0
self.pingTimer=RepeatTimer(self.pingInterVal/1000, self.ping)
if timeOut is None:
timeOut=Eventbus.DEFAULT_TIMEOUT
if timeOut < 0.01:
self.timeOut = 0.01
else:
self.timeOut = timeOut
self.state = State.CONNECTING
if connect:
# connect
self.open()
def open(self):
"""
open the eventbus by connecting the eventbus socket and starting a listening thread
by default the connection is opened on construction of an Eventbus instance
:raise:
:IOError: - the socket could not be opened
:Exception: - some other issue e.g. with starting the listening thread
"""
try:
self._connect()
t1 = threading.Thread(target=self._receivingThread)
t1.start()
except IOError as e:
self.close()
raise e
except Exception as e:
self.close()
raise e
def _connect(self):
""" connect my socket """
self.sock.connect((self.host, self.port))
self.sock.settimeout(self.timeOut)
def wait(self,state=State.OPEN,timeOut=5.0, timeStep=0.01):
"""
wait for the eventbus to reach the given state
Args:
state(State): the state to wait for - default: State.OPEN
timeOut(float): the timeOut in secs after which the wait fails with an Exception
timeStep(float): the timeStep in secs in which the state should be regularly checked
:raise:
:Exception: wait timed out
"""
timeLeft=timeOut;
while not self.state is state and timeLeft>0:
time.sleep(timeStep)
timeLeft=timeLeft-timeStep
if timeLeft<=0:
raise Exception("wait for %s timedOut after %.3f secs" % (state.name,timeOut))
if self.debug:
print("wait for %s successful after %.3f secs" % (state.name,timeOut-timeLeft))
def addHeader(self, header, value):
"""
add a header with the given header key and value
Args:
header(str): the key of the header value to add
value(object): the value of the header value to add
"""
self.headers[header] = value
def isOpen(self):
"""
Checks if the eventbus state is OPEN.
Returns:
bool: True if State is OPEN else False
"""
if self.state is State.OPEN:
return True
return False
def onErrorHandler(self,message):
"""
default onError Handler - only gives debug output
"""
if self.debug:
print("error message '%s' not handled" % message)
def pongHandler(self):
"""
default pong Handler - counts the number of pongs Received
"""
self.pongCount=self.pongCount+1
if self.debug:
print("pong %d received" %self.pongCount)
def _sendFrame(self, message_s):
"""
send the given message
Args:
message_s (str): the message to be sent.
"""
message = message_s.encode('utf-8')
msgLen=len(message)
frame = struct.pack('!I', msgLen) + message
if self.debug:
print("sending %d bytes '%s'" % (msgLen,message_s))
self.sock.sendall(frame)
def _receive(self):
"""
receive a message as specified in https://vertx.io/docs/vertx-tcp-eventbus-bridge/java/
<Length: uInt32><{
type: String,
address: String,
(replyAddress: String)?,
headers: JsonObject,
body: JsonObject
}: JsonObject>
"""
if (self.debug):
print ("trying to receive a message in state %s" % self.state.name)
# this is a blocking call which should run in separate thread
# receive the first uInt32 4 bytes
if self.state < State.CLOSING: # closing socket
len_str = self.sock.recv(4)
else:
raise Exception("eventbus is closed while trying to receive first 4 bytes of message/Length")
len1 = struct.unpack("!i", len_str)[0]
if (self.debug):
print ("trying to receive %d bytes in state %s" % (len1,self.state.name))
if self.state < State.CLOSING: # closing socket
payload = self.sock.recv(len1)
else:
raise Exception("eventbus is closed while trying to receive payload of %d bytes" % (len1))
json_message = payload.decode('utf-8')
message = json.loads(json_message)
debugInfo="%d message bytes with payload %s" % (len1,message)
# check
if (self.debug):
print(debugInfo)
if not 'type' in message:
raise Exception("invalid message - type missing in: '%s'" % debugInfo)
msgType=message['type'];
if msgType == 'message':
if 'address' not in message:
raise Exception("invalid message - address missing in '%s'" % debugInfo)
address=message['address']
if address in self.handlers:
for handler in self.handlers[address]:
handler(None,message)
elif address in self.replyHandler:
handler=self.replyHandler[address]
handler(None,message)
del self.replyHandler[address]
else:
raise Exception("no handler for address %s" % debugInfo)
elif msgType == 'err':
self.onError(message)
elif msgType == 'pong':
self.pongHandler()
else:
raise Exception("invalid message type %s in '%s'" %(msgType,debugInfo) )
def _receivingThread(self):
"""
receive loop to be started in separate Thread
"""
self.state = State.OPEN
self.pingTimer.start()
# debug message after open ..
if self.debug:
print ("starting receiving thread")
while self.state < State.CLOSING: # CONNECTING=0, OPEN=1
try:
self._receive()
except Exception as e:
if self.debug:
print(e)
if self.debug:
print ("receiving thread finished in state %s" % self.state.name)
self.sock.close()
self.state = State.CLOSED
def close(self):
"""
close the eventbus connection after staying in the CLOSING state
for the given timeInterval
Args:
timeInterval(float): the number of seconds to sleep before actually closing the eventbus - default: 30 seconds
"""
if self.state == State.CONNECTING:
self.sock.close()
return
self.pingTimer.cancel()
self.state = State.CLOSING
# wait for the socket timeout
self.wait(State.CLOSED,timeOut=self.timeOut)
def _mergeHeaders(self,headers=None):
""" merge the given headers with the default headers
Args:
headers(dict): the headers to merge - default:None
Returns:
dict: the merged headers dict
"""
if headers is None:
return self.headers
else:
# https://stackoverflow.com/a/26853961/1497139
mergedHeaders=self.headers.copy()
mergedHeaders.update(headers)
return mergedHeaders
def _send(self,msgType,address,replyAddress=None,body=None, headers=None):
"""
send a message of the given message type to the given address with the givne body
Args:
msgType(str): the type of the message publish, send or ping
address(str): the target address to send the message to
body(str): the body of the message e.g. a JSON object
headers(dict): headers to be added - default: None
:raise:
:Exception: - eventbus is not open
"""
if not self.isOpen():
raise Exception("eventbus is not open when trying to %s to %s" % (msgType,address))
headers=self._mergeHeaders(headers)
if msgType=='send' and replyAddress is not None:
message = json.dumps(
{'type': msgType, 'address': address, 'replyAddress':replyAddress, 'headers': headers, 'body': body })
else:
message = json.dumps(
{'type': msgType, 'address': address, 'headers': headers, 'body': body })
self._sendFrame(message)
def ping(self):
"""
send a ping
:raise:
:Exception: - eventbus is not open
"""
msgType='ping'
if not self.isOpen():
raise Exception("eventbus is not open when trying to %s" % (msgType))
message = json.dumps(
{'type': msgType})
self._sendFrame(message)
def send(self, address, body=None, callback=None, headers=None):
"""
send a message
Args:
address(str): the target address to send the message to
body(str): the body of the message e.g. a JSON object- default: None
headers(dict): headers to be added - default: None
:raise:
:Exception: - eventbus is not open
"""
replyAddress=None
if callback is not None:
replyAddress=str(uuid.uuid4())
self.replyHandler[replyAddress]=callback
self._send('send',address,replyAddress=replyAddress,body=body,headers=headers)
def publish(self, address, body=None,headers=None):
"""
publish a message
Args:
address(str): the target address to send the message to
body(str): the body of the message e.g. a JSON object
headers(dict): headers to be added - default: None
:raise:
:Exception: - eventbus is not open
"""
self._send('publish',address,body=body)
def registerHandler(self, address, callback, headers=None):
"""
register a handler
Args:
address(str): the address to register a handler for
callback(function): a callback for the address
headers(dict): headers to be added - default: None
:raise:
:Exception:
- eventbus is not open
- callback not callable
"""
if not self.isOpen():
raise Exception("eventbus is not open when trying to register Handler for %s" % address)
if not callable(callback):
raise Exception("callback for registerHandler must be callable")
if not address in self.handlers:
self.handlers[address]=[]
self._send('register', address, headers=headers)
self.handlers[address].append(callback)
def unregisterHandler(self, address,callback,headers=None):
"""
unregister a callback for a given address
if there is more than one callback for the address it will be remove from the handler list
if there is only one callback left an unregister message will be sent over the bus and then
the address is fully removed
Args:
address(str): the address to unregister the handler for
callback(function): the callback to unregister
headers(dict): headers to be added - default: None
:raise:
:Exception:
- eventbus is not open
- address not registered
- callback not registered
"""
if not self.isOpen():
raise Exception("eventbus is not open when trying to unregister handler for %s" % (address))
if address not in self.handlers:
raise Exception("can't unregister address %s - address not registered" % (address))
callbacks=self.handlers[address]
if callback not in callbacks:
raise Exception("can't unregister callback for %s - callback not registered" % (address))
callbacks.remove(callback)
if len(callbacks) == 0:
self._send('unregister', address, body=None, headers=headers)
del self.handlers[address]
|
from django.conf.urls import url
from slack_utils import views
urlpatterns = [
url('events/$', views.EventsView.as_view(), name='slack-events-api'),
url('commands/$', views.CommandView.as_view(), name='slack-commands'),
]
|
from gallery.db.dao import BaseDAO
from gallery.db.parcel.builders import IndexGalleryParcelBuilder, PageGalleryParcelBuilder
from gallery.models import BellImage, BellGallery
from django.core.exceptions import ObjectDoesNotExist
from gallery.exceptions import NonexistentGalleryError
class RestfulIndexDAO(BaseDAO):
def get_all_galleries(self):
output_galleries = []
for mdl in BellGallery.objects.all():
glry = BellImage.objects.filter(parent_gallery=mdl.pk)
prv_img = glry[0] if glry else None
sr_mdl = IndexGalleryParcelBuilder(mdl, prv_img).create()
output_galleries.append(sr_mdl)
return output_galleries
class RestfulPageDAO(BaseDAO):
def __init__(self, gallery_id, image_qty_per_sync=10):
self.gallery_id = gallery_id
self.image_qty = image_qty_per_sync
def get_gallery(self):
try:
gallery = BellGallery.objects.get(id=self.gallery_id)
except ObjectDoesNotExist:
raise NonexistentGalleryError(self.gallery_id)
else:
image_list = BellImage.objects.filter(parent_gallery=self.gallery_id)
return PageGalleryParcelBuilder(gallery, image_list).create()
|
import re, sys
def usage():
print("Usage:\n%s [input file] [output file]" % (sys.argv[0]))
def getcard(card_snippet):
cards = []
matcher = re.compile("\\\carta\{(.*?)\}", re.S | re.I)
m = re.findall(matcher, card_snippet)
for match in m:
text = match.replace('\n', ' ').replace('\puntini', '_').replace('{\small', '').strip()
text = text.replace('\%', '%')
text = text.replace('``', '\'')
text = text.replace("''", "'")
cards.append(text)
return cards
if len(sys.argv) != 3:
usage()
exit()
with open(sys.argv[1], 'r') as texfile:
document = texfile.read()
matcher_pages = re.compile(
r'\\begin\{longtable\}\{.*?\}(.*?)\\end\{longtable\}.*?\\begin\{longtable\}\{.*?\}(.*?)\\end{longtable}', re.S | re.I)
m = re.search(matcher_pages, document)
whites = getcard(m[1])
blacks = getcard(m[2])
with open(sys.argv[2] + "/white.md.txt", "w") as f:
for l in whites:
f.write(l + "\n")
with open(sys.argv[2] + "/black.md.txt", "w") as f:
for l in blacks:
f.write(l + "\n")
|
import requests
import json
import sys
sys.path.append("..")
from utils.config import TOKEN
from utils.config import API_URL
class Api:
def __init__(self, obj):
self.endpoint = obj.get("endpoint")
self.symbol = obj.get("symbol")
self.sector = obj.get("sector")
self.period = obj.get("period")
self.limit = obj.get("limit")
def _endpoint(self):
endpoints = {
"symbols": "/ref-data/symbols",
"crypto_symbols": "/ref-data/crypto/symbols",
"crypto_prices": f"/crypto/{self.symbol}/price",
"sectors": "/ref-data/sectors",
"company": f"/stock/{self.symbol}/company",
"historical": f"/stock/{self.symbol}/chart/{self.period}",
"company_logo": f"/stock/{self.symbol}/logo",
"news": f"/stock/{self.symbol}/news/last/"
}
return endpoints.get(self.endpoint)
@staticmethod
def _url(path):
return f"{API_URL}{path}"
@staticmethod
def _response_parser(response):
if response.status_code == 200:
return response.json()
elif response.status_code == 404:
print(f"Not Found - URL: {response.url}")
elif response.status_code == 403:
print(f"Api version error: {response.url}")
else:
print(f"An error occurred")
def mock(self):
r = self.get()
with open(f"D:\Documents\GitHub\stocker\mock\{self.endpoint}_mock.json", "w") as write_file:
json.dump(r, write_file)
def get(self):
path = self._endpoint()
r = requests.get(self._url(path=path), params=TOKEN)
return self._response_parser(r)
|
from __future__ import annotations
import asyncio
from typing import Any
import asynctest.mock # type: ignore
import pytest # type: ignore
import pytest_mock._util # type: ignore
pytest_mock._util._mock_module = asynctest.mock
class EventLoopClockAdvancer:
"""
A helper object that when called will advance the event loop's time. If the
call is awaited, the caller task will wait an iteration for the update to
wake up any awaiting handlers.
"""
__slots__ = ("offset", "loop", "sleep_duration", "_base_time")
def __init__(self, loop, sleep_duration=1e-4):
self.offset = 0.0
self._base_time = loop.time
self.loop = loop
self.sleep_duration = sleep_duration
# incorporate offset timing into the event loop
self.loop.time = self.time
def time(self):
"""
Return the time according to the event loop's clock. The time is
adjusted by an offset.
"""
return self._base_time() + self.offset
async def __call__(self, seconds):
"""
Advance time by a given offset in seconds. Returns an awaitable
that will complete after all tasks scheduled for after advancement
of time are proceeding.
"""
# sleep so that the loop does everything currently waiting
await asyncio.sleep(self.sleep_duration)
if seconds > 0:
# advance the clock by the given offset
self.offset += seconds
# Once the clock is adjusted, new tasks may have just been
# scheduled for running in the next pass through the event loop
await asyncio.sleep(self.sleep_duration)
@pytest.fixture
def advance_time(event_loop):
return EventLoopClockAdvancer(event_loop)
@pytest.fixture
def mock_aiohttp(mocker: Any) -> None:
mocker.patch('aiohttp.ClientSession', autospec=True)
@pytest.fixture
def mock_discord_bot(mocker: Any) -> None:
mocker.patch('discord.ext.commands.Bot')
@pytest.fixture(autouse=True)
def add_async_mocks(mocker: Any) -> None:
mocker.CoroutineMock = mocker.mock_module.CoroutineMock
|
import myhdl
from myhdl import Signal, intbv
def blinky(clock, led, button):
"""A simple LED blink example.
This is intended to be used with the Xula2+StickIt motherboard
and an LED+button PMOD board.
Arguments (ports):
clock: 12MHz external clock
led: the LED port/pin
button: the button port/pin
Parameters:
pmod: select which PMOD connector the PMOD is connected
"""
maxcnt = int(clock.frequency) // 4
cnt = Signal(intbv(0, min=0, max=maxcnt))
toggle = Signal(bool(0))
@always(clock.posedge)
def beh_toggle():
if cnt >= maxcnt-1:
toggle.next = not toggle
cnt.next = 0
else:
cnt.next = cnt + 1
@always_comb
def beh_assign():
if not button:
led.next = True
else:
led.next = toggle
return beh_toggle, beh_assign |
# Package provides a simple widget-based framework for interactive visualization of algorithms.
# PyPi: https://pypi.org/project/interactive-visualization/
# Github: https://github.com/Malkovsky/interactive-visualization
# pip install interactive-visualization
|
def title1(content):
return "<h1 class='display-1'>{}</h1>".format(content)
def title2(content):
return "<h2 class='display-2'>{}</h2>".format(content)
def title3(content):
return "<h3 class='display-3'>{}</h3>".format(content)
def title4(content):
return "<h4 class='display-4'>{}</h4>".format(content)
def title5(content):
return "<h5 class='display-5'>{}</h5>".format(content)
def title6(content):
return "<h6 class='display-6'>{}</h6>".format(content)
def h1(content):
return "<h1>{}</h1>".format(content)
def h2(content):
return "<h2>{}</h2>".format(content)
def h3(content):
return "<h3>{}</h3>".format(content)
def h4(content):
return "<h4>{}</h4>".format(content)
def h5(content):
return "<h5>{}</h5>".format(content)
def h6(content):
return "<h6>{}</h6>".format(content)
def important(content):
return """<div class="lead">{}</div>""".format(content)
def small(content):
return "<small>{}</small>".format(content)
def p(content):
return "<p>{}</p>".format(content)
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
class RandomWalk2D:
def __init__(self):
self.positions = []
self.steps = []
self.time = 0
def render(self, time, step_length=1):
self.time = time
self.positions = np.zeros(time + 1, dtype=np.complex64)
self.steps = np.random.choice([1, -1, 1j, -1j], time) * step_length
def calc_positions(self):
for time, step in enumerate(self.steps):
self.positions[time + 1] = self.positions[time] + step
return self.positions
def model_line_func(t, a, b):
return a * t + b
def calc_var(time=100, step=10, l_s=(0.5, 1, 2), sample_numbers=100000, file_name=None):
if file_name is None:
times = np.arange(0, time, step, dtype=int)
model = RandomWalk2D()
sample_positions_ensemble = np.zeros((len(l_s), sample_numbers, len(times)), dtype=np.complex64)
for (l_i, l) in enumerate(l_s):
print(str(l) + ':')
for sample_number in range(sample_numbers):
print('\r \t' + str(sample_number), end='')
model.render(time, l)
sample_positions_ensemble[l_i][sample_number] = model.calc_positions()[times]
print(end='\n')
data = {
'times': times,
'time': time,
'step': step,
'l_s': l_s,
'positions': sample_positions_ensemble,
'numbers': sample_numbers
}
np.save("data/q5_" + str(l_s) + "_" + str(time) + "_" + str(step) + "_" + str(sample_numbers) + "_ensemble", data)
else:
data = np.load('data/q5_' + file_name + '_ensemble.npy', allow_pickle=True).tolist()
times = data['times']
time = data['time']
step = data['step']
l_s = data['l_s']
sample_positions_ensemble = data['positions']
sample_numbers = data['numbers']
positions_var = np.var(sample_positions_ensemble, axis=1)
for (l_i, l) in enumerate(l_s):
position_var = positions_var[l_i]
var_fit_para, var_fit_error = curve_fit(model_line_func, times, position_var)
var_fit_error = np.diag(var_fit_error)
var_fit = var_fit_para[1] + var_fit_para[0] * times
plt.plot(times, var_fit, linestyle='--', color='g')
plt.plot(times, position_var, linestyle='', marker='.', markersize=5)
plt.xlabel(r'$t$')
plt.ylabel(r'$<r^2(t)>$')
plt.legend(['Fitted line', 'l = ' + str(l)])
plt.savefig(
"images/q5_var_" + str(l) + "_" + str(time) + "_" + str(step) + "_" + str(sample_numbers) + '.png')
plt.show()
print('p = ' + str(l) + ':')
print('\t var(r) slope: ' + str(var_fit_para[0]) + ' ± ' + str(var_fit_error[0]))
calc_var(100, 10, (0.5, 1, 2), 100000)
|
import dynet as dy
import dynet_modules as dm
import numpy as np
import random
from utils import *
from time import time
from collections import defaultdict
from modules.seq_encoder import SeqEncoder
from modules.bag_encoder import BagEncoder
from modules.tree_encoder import TreeEncoder
class LinDecoder(Decoder):
def __init__(self, args, model):
super().__init__(args, model)
self.train_input_key = 'input_tokens'
self.train_output_key = 'gold_linearized_tokens'
self.pred_input_key = 'input_tokens'
self.pred_output_key = 'linearized_tokens'
if 'seq' in self.args.tree_vecs:
self.seq_encoder = SeqEncoder(self.args, self.model, 'lin_seq')
if 'bag' in self.args.tree_vecs:
self.bag_encoder = BagEncoder(self.args, self.model, 'lin_bag')
if 'tree' in self.args.tree_vecs:
self.tree_encoder = TreeEncoder(self.args, self.model, 'lin_tree')
self.l2r_linearizer = L2RLinearizer(self.args, self.model) if 'l2r' in self.args.lin_decoders else None
self.r2l_linearizer = R2LLinearizer(self.args, self.model) if 'r2l' in self.args.lin_decoders else None
self.h2d_linearizer = H2DLinearizer(self.args, self.model) if 'h2d' in self.args.lin_decoders else None
self.log(f'Initialized <{self.__class__.__name__}>, params = {self.model.parameter_count()}')
def encode(self, sent):
# encode
if 'seq' in self.args.tree_vecs:
self.seq_encoder.encode(sent, 'linearized_tokens' if self.args.pred_seq else 'gold_linearized_tokens')
if 'bag' in self.args.tree_vecs:
self.bag_encoder.encode(sent)
if 'tree' in self.args.tree_vecs:
self.tree_encoder.encode(sent, self.args.pred_tree)
sum_vecs(sent, 'lin_vec', ['feat', 'lin_seq', 'lin_bag', 'lin_tree'])
def predict(self, sent, pipeline=False):
# top-down traverse to sort each domain
sent_agenda = [SentSequence(sent)]
self.encode(sent)
for token in traverse_topdown(sent.root):
all_agendas = []
ranks = {}
if 'l2r' in self.args.lin_decoders:
init_seq = self.l2r_linearizer.init_seq(token)
agenda, _ = self.l2r_linearizer.decode(init_seq)
all_agendas.append(agenda)
if 'r2l' in self.args.lin_decoders:
init_seq = self.r2l_linearizer.init_seq(token)
agenda, _ = self.r2l_linearizer.decode(init_seq)
all_agendas.append(agenda)
if 'h2d' in self.args.lin_decoders:
init_seq = self.h2d_linearizer.init_seq(token)
agenda, _ = self.h2d_linearizer.decode(init_seq)
all_agendas.append(agenda)
best_seqs = self.vote_best_seq(sent, all_agendas, self.args.beam_size)
token['linearized_domain'] = [t for t in best_seqs[0].linearized_tokens()] # remove <$$$>
new_agenda = []
for sent_seq in sent_agenda:
for seq in best_seqs:
new_seq = sent_seq.append(seq)
new_agenda.append(new_seq)
new_agenda.sort(key=lambda x: -x.score)
sent_agenda = new_agenda[:self.args.beam_size]
sent['nbest_linearized_tokens'] = [seq.get_sorted_tokens() for seq in sent_agenda]
sent['linearized_tokens'] = sent['nbest_linearized_tokens'][0]
def train_one_step(self, sent):
domain_total = domain_correct = loss_value = 0
t0 = time()
errs = []
self.encode(sent)
sent_agenda = [SentSequence(sent)]
for token in traverse_topdown(sent.root):
all_agendas = []
# training left-to-right
if 'l2r' in self.args.lin_decoders:
gold_seq = self.l2r_linearizer.init_seq(token)
while not self.l2r_linearizer.finished(gold_seq):
agenda, gold_seq = self.l2r_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if seq is not gold_seq]
errs.append(dy.hinge(dy.concatenate(scores), 0))
# right-to-left
if 'r2l' in self.args.lin_decoders:
gold_seq = self.r2l_linearizer.init_seq(token)
while not self.r2l_linearizer.finished(gold_seq):
agenda, gold_seq = self.r2l_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if seq is not gold_seq]
errs.append(dy.hinge(dy.concatenate(scores), 0))
# head-to-dep
if 'h2d' in self.args.lin_decoders:
gold_seq = self.h2d_linearizer.init_seq(token)
agenda = [gold_seq]
if self.h2d_linearizer.finished(gold_seq):
all_agendas.append(agenda)
else:
while not self.h2d_linearizer.finished(gold_seq):
agenda, gold_seq = self.h2d_linearizer.decode(gold_seq, True)
all_agendas.append(agenda)
# update only against all incorrect sequences (exclude lower scoring gold seq)
if gold_seq is not agenda[0]:
scores = [gold_seq.score_expr] + [seq.score_expr for seq in agenda if not seq.correct]
errs.append(dy.hinge(dy.concatenate(scores), 0))
new_agenda = []
best_seqs = self.vote_best_seq(sent, all_agendas, self.args.beam_size)
for sent_seq in sent_agenda:
for seq in best_seqs:
new_seq = sent_seq.append(seq)
new_agenda.append(new_seq)
new_agenda.sort(key=lambda x: -x.score)
sent_agenda = new_agenda[:self.args.beam_size]
if token['deps']:
domain_total += 1
domain_correct += agenda[0].correct
sent['nbest_linearized_tokens'] = [seq.get_sorted_tokens() for seq in sent_agenda]
# random sequence from the beam to give the downstream training set more realistic input
sent['linearized_tokens'] = random.choice(sent['nbest_linearized_tokens'])
loss = dy.esum(errs) if errs else 0
loss_value = loss.value() if loss else 0
return {'time': time()-t0,
'loss': loss_value,
'loss_expr': loss,
'total': domain_total,
'correct': domain_correct
}
def evaluate(self, sents):
gold_seqs = [sent[self.train_output_key] for sent in sents]
pred_seqs = [sent[self.pred_output_key] for sent in sents]
pred_bleu = eval_all(gold_seqs, pred_seqs)
if 'nbest_linearized_tokens' in sents[0]:
rand_seqs = [random.choice(sent['nbest_linearized_tokens']) for sent in sents]
orac_seqs = [max([(sent_bleu(gs, ps), ps) for ps in sent['nbest_linearized_tokens']], key=lambda x: x[0])[1] \
for gs, sent in zip(gold_seqs, sents)]
rand_bleu = eval_all(gold_seqs, rand_seqs)
orac_bleu = eval_all(gold_seqs, orac_seqs)
self.log(f'<PRED>{pred_bleu*100:.2f}')
self.log(f'<RAND>{rand_bleu*100:.2f}')
self.log(f'<ORAC>{orac_bleu*100:.2f}')
return pred_bleu
def vote_best_seq(self, sent, all_agendas, top=1):
all_seqs = defaultdict(float)
ids2seq = {}
for agenda in all_agendas:
min_score = min(seq.score for seq in agenda)
for seq in agenda:
ids = seq.ids()
all_seqs[ids] += (seq.score - min_score)
if ids not in ids2seq or seq.score > ids2seq[ids].score:
ids2seq[ids] = seq
sorted_ids = sorted(ids2seq, key=lambda x: -all_seqs[ids])
sorted_seqs = [ids2seq[ids] for ids in sorted_ids]
return sorted_seqs[:top]
class L2RLinearizer:
def __init__(self, args, model):
print('<L2RLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.pointer = Pointer(model, self.args.token_dim)
self.seq_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.init_vec = model.add_parameters(self.args.token_dim)
def init_seq(self, token):
return SequenceL2R(self.seq_lstm.initial_state().add_input(self.init_vec), token, [], token['domain'])
def finished(self, seq):
return len(seq.rest) == 0
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
scores = self.pointer.point(seq.state.output(), cand_mat)
# scores = dy.log_softmax(scores)
for t, s in zip(seq.rest, scores):
if self.args.no_lin_constraint or seq.check_order(t):
new_seq = seq.append(t, s)
new_agenda.append(new_seq)
if train_mode and new_seq.is_gold():
gold_seq = new_seq
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class R2LLinearizer:
def __init__(self, args, model):
print('<R2LLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.pointer = Pointer(model, self.args.token_dim)
self.seq_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.init_vec = model.add_parameters(self.args.token_dim)
def init_seq(self, token):
return SequenceR2L(self.seq_lstm.initial_state().add_input(self.init_vec), token, [], token['domain'])
def finished(self, seq):
return len(seq.rest) == 0
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
scores = self.pointer.point(seq.state.output(), cand_mat)
# scores = dy.log_softmax(scores)
for t, s in zip(seq.rest, scores):
if self.args.no_lin_constraint or seq.check_order(t):
new_seq = seq.append(t, s)
# print(new_seq, 'g' if new_seq.is_gold() else 'w')
new_agenda.append(new_seq)
if train_mode and new_seq.is_gold():
gold_seq = new_seq
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class H2DLinearizer:
def __init__(self, args, model):
print('<H2DLinearizer>')
self.args = args
Pointer = {'simple': dm.SimplePointer, 'glimpse':dm.GlimpsePointer, 'self':dm.SelfPointer}[self.args.pointer_type]
self.l_pointer = Pointer(model, self.args.token_dim, self.args.token_dim)
self.r_pointer = Pointer(model, self.args.token_dim, self.args.token_dim)
self.h2l_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.h2r_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
def finished(self, seq):
return len(seq.rest) == 0
def init_seq(self, token):
lstate = self.h2l_lstm.initial_state().add_input(token.vecs['lin_vec'])
rstate = self.h2r_lstm.initial_state().add_input(token.vecs['lin_vec'])
return SequenceH2D(lstate, rstate, token, [t for t in token['deps'] if t.not_empty()])
# return SequenceH2D(lstate, rstate, token, token['deps'])
def decode(self, gold_seq, train_mode=False):
agenda = [gold_seq]
steps = len(gold_seq.rest)
for i in range(steps):
new_agenda = []
gold_seq = None
ids2seq = {}
for seq in agenda:
cand_mat = dy.concatenate_cols([t.vecs['lin_vec'] for t in seq.rest])
l_scores = self.l_pointer.point(seq.lstate.output(), cand_mat)
r_scores = self.r_pointer.point(seq.rstate.output(), cand_mat)
for t, s in zip(seq.rest, l_scores):
if self.args.no_lin_constraint or t not in seq.l_order or t is seq.l_order[-1]:
new_seq = seq.append_left(t, s)
ids = new_seq.ids()
if ids not in ids2seq or new_seq.score > ids2seq[ids].score:
ids2seq[ids] = new_seq
if train_mode and new_seq.is_gold() and (not gold_seq or new_seq.score > gold_seq.score):
gold_seq = new_seq
for t, s in zip(seq.rest, r_scores):
if self.args.no_lin_constraint or t not in seq.r_order or t is seq.r_order[0]:
new_seq = seq.append_right(t, s)
ids = new_seq.ids()
if ids not in ids2seq or new_seq.score > ids2seq[ids].score:
ids2seq[ids] = new_seq
if train_mode and new_seq.is_gold() and (not gold_seq or new_seq.score > gold_seq.score):
gold_seq = new_seq
new_agenda = list(ids2seq.values())
new_agenda.sort(key=lambda x: -x.score)
agenda = new_agenda[:self.args.beam_size]
if train_mode and gold_seq not in agenda:
break
return agenda, gold_seq
class SequenceL2R:
def __init__(self, state, head, tokens, rest, lost_rest=[], prev=None):
self.state = state
self.head = head
self.tokens = tokens
self.rest = rest
self.gold_lost_rest = lost_rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.gold_lost_rest = self.head['lost']
self.required_order = self.head['order'][:]
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.required_order = prev.required_order[:]
def __repr__(self):
return ' '.join(str(t['original_id']) for t in self.tokens) + '(' +' '.join(str(t['original_id']) for t in self.rest) + ')'
def ids(self):
return tuple(t['tid'] for t in self.tokens)
def lemmas(self):
return tuple(t['lemma'] for t in self.tokens)
def oids(self):
return [t['original_id'] for t in self.tokens]
def linearized_tokens(self):
return self.tokens
def check_order(self, tk):
return tk not in self.required_order or tk is self.required_order[0]
def append(self, tk, s):
state = self.state.add_input(tk.vecs['lin_vec'])
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceL2R(state, self.head, self.tokens+[tk], [t for t in self.rest if t is not tk], lost_rest, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.required_order:
seq.required_order.remove(tk)
return seq
def is_gold(self, lost=False):
rest_ids = [t['original_id'] for t in self.rest + (self.gold_lost_rest if lost else [])]
self.correct = self.prev.correct and all(self.tokens[-1]['original_id'] < i for i in rest_ids)
return self.correct
class SequenceR2L:
def __init__(self, state, head, tokens, rest, prev=None):
self.state = state
self.head = head
self.tokens = tokens
self.rest = rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.required_order = self.head['order'][:]
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.required_order = prev.required_order[:]
def __repr__(self):
return ' '.join(str(t['original_id']) for t in self.tokens) + '(' +' '.join(str(t['original_id']) for t in self.rest) + ')'
def linearized_tokens(self):
return self.tokens
def ids(self):
return tuple(t['tid'] for t in self.tokens)
def lemmas(self):
return tuple(t['lemma'] for t in self.tokens)
def oids(self):
return [t['original_id'] for t in self.tokens]
def check_order(self, tk):
return tk not in self.required_order or tk is self.required_order[-1]
def append(self, tk, s):
state = self.state.add_input(tk.vecs['lin_vec'])
seq = SequenceR2L(state, self.head, [tk]+self.tokens, [t for t in self.rest if t is not tk], self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.required_order:
seq.required_order.remove(tk)
return seq
def is_gold(self):
self.correct = self.prev.correct and all(self.tokens[0]['original_id'] > t['original_id'] for t in self.rest)
return self.correct
class SequenceH2D:
"""
Double-ended Sequence, starts with the head token,
appends dependents on both sides from near to far,
allows spurious ambiguity of the gold sequence,
"""
def __init__(self, lstate, rstate, head, rest, lost_rest=[], ldeps=[], rdeps=[], prev=None):
self.lstate = lstate
self.rstate = rstate
self.head = head
self.ldeps = ldeps # grow inside-out
self.rdeps = rdeps # grow inside-out
self.rest = rest
self.gold_lost_rest = lost_rest
self.prev = prev
if prev is None:
self.score = 0
self.score_expr = 0
self.correct = True
self.l_order = self.head['l_order'][:]
self.r_order = self.head['r_order'][:]
self.gold_lost_rest = self.head['lost']
# print('lost', [t['original_id'] for t in self.head['lost']])
else:
self.score = prev.score
self.score_expr = prev.score_expr
self.l_order = prev.l_order[:]
self.r_order = prev.r_order[:]
self.correct = prev.correct
def ids(self):
return tuple(t['tid'] for t in self.ldeps + [self.head] + self.rdeps)
# return tuple(t['tid'] for t in self.linearized_tokens())
def oids(self):
return [t['original_id'] for t in self.ldeps + [self.head] + self.rdeps]
def linearized_tokens(self):
# all content tokens (excluding <$$$>)
return [t for t in (self.ldeps + [self.head] + self.rdeps) if t['lemma'] != '<$$$>']
def __repr__(self):
return ' '.join(str(t) for t in self.ldeps) + \
'<' + str(self.head) + '>' + \
' '.join(str(t) for t in self.rdeps) + \
' [' + ' '.join(str(t) for t in self.rest) + ']' +\
' {' + ' '.join(str(t) for t in self.gold_lost_rest) + '}'
def lmost(self):
return self.ldeps[0] if self.ldeps else self.head
def rmost(self):
return self.rdeps[-1] if self.rdeps else self.head
def append_left(self, tk, s):
lstate = self.lstate.add_input(tk.vecs['lin_vec'])
rstate = self.rstate
ldeps = [tk] + self.ldeps
rdeps = self.rdeps
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceH2D(lstate, rstate, self.head, [t for t in self.rest if t is not tk], lost_rest, ldeps, rdeps, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.l_order:
seq.l_order.remove(tk)
return seq
def append_right(self, tk, s):
lstate = self.lstate
rstate = self.rstate.add_input(tk.vecs['lin_vec'])
ldeps = self.ldeps
rdeps = self.rdeps + [tk]
lost_rest = [t for t in self.gold_lost_rest if t['original_id'] != tk['original_id']] # non-empty only in training
seq = SequenceH2D(lstate, rstate, self.head, [t for t in self.rest if t is not tk], lost_rest, ldeps, rdeps, self)
seq.score_expr += s
seq.score += s.value()
if tk in seq.r_order:
seq.r_order.remove(tk)
return seq
def is_gold(self, lost=False):
lmost, rmost = self.lmost(), self.rmost()
rest_ids = [t['original_id'] for t in self.rest + (self.gold_lost_rest if lost else [])]
ids = [t['original_id'] for t in self.linearized_tokens()]
if lmost['lemma'] == '<$$$>' and rest_ids and min(rest_ids) < lmost['original_id']:
self.correct = False
elif rmost['lemma'] == '<$$$>' and rest_ids and max(rest_ids) > rmost['original_id']:
self.correct = False
else:
self.correct = self.prev.correct and len(ids) == len(set(ids)) and ids == sorted(ids) and \
not any(min(ids) < tid < max(ids) for tid in rest_ids)
return self.correct
class SentSequence:
def __init__(self, sent, domain_seqs = {}):
self.sorted_tokens = []
self.sent = sent
self.score_expr = 0
self.score = 0
self.inv_num = None
self.domain_seqs = domain_seqs
if not self.domain_seqs:
self.domain_seqs[0] = self.sent.root['deps']
def append(self, domain_seq):
new_seq = SentSequence(self.sent, copy(self.domain_seqs))
new_seq.domain_seqs[domain_seq.head['tid']] = domain_seq.linearized_tokens()
new_seq.score_expr = self.score_expr + domain_seq.score_expr
new_seq.score = self.score + domain_seq.score
return new_seq
def is_gold(self):
return all(seq.correct for seq in self.domain_seqs)
def get_sorted_tokens(self):
if not self.sorted_tokens:
self.sorted_tokens = self.flatten(self.sent.root)
return self.sorted_tokens
def get_inv_num(self):
if self.inv_num is None:
self.inv_num = inverse_num(self.get_sorted_tokens()) ** 0.5
return self.inv_num
def flatten(self, head):
return sum([(self.flatten(tk) if (tk is not head) else ([tk] if tk['tid'] else []) ) \
for tk in self.domain_seqs[head['tid']]], [])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-09 17:11
from __future__ import unicode_literals
from django.db import migrations
subscription_choices = ['A', 'B', 'A+B', 'AdL', 'Student', 'Professor',
'Other']
def subtype_choice_to_foreignkey(apps, schema_editor):
Subscribe = apps.get_model("accounts", "Subscribe")
SubscriptionType = apps.get_model("accounts", "SubscriptionType")
# create subscription type object for each current subscription type
# and setup dict for lookup by name
subscription_types = {}
for subtype in subscription_choices:
subscr_type, created = SubscriptionType.objects.get_or_create(name=subtype)
subscription_types[subtype] = subscr_type
# update all subscribe events to
for subscription in Subscribe.objects.all():
# set new foreign key category based on choice field value
if subscription.sub_type:
subscription.category = \
subscription_types[subscription.get_sub_type_display()]
subscription.save()
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_new_subscription_type_model_foreignkey'),
]
operations = [
migrations.RunPython(subtype_choice_to_foreignkey,
migrations.RunPython.noop)
]
|
#!/usr/bin/python3
import requests
import sys
from collections import defaultdict
from datetime import datetime
from tqdm import tqdm
def main():
url = f'https://codeforces.com/api/user.ratedList?activeOnly=true'
r = requests.get(url)
users = r.json()['result']
print('handle,rating,country,city,solved')
for user in tqdm(users):
handle = user['handle']
rating = user['rating']
country = user['country'] if 'country' in user else ''
city = user['city'] if 'city' in user else ''
url = f'https://codeforces.com/api/user.status?handle={handle}'
r = requests.get(url)
submissions = r.json()['result']
def id(submission):
problem = submission['problem']
problem['name'] + str(problem['contestId']) if 'contestId' in problem else ''
solved = [ id(submission) for submission in submissions if submission['verdict'] == 'OK']
solvedNum = len(set(solved))
print(f"{handle}, {rating}, {country}, {city}, {solvedNum}")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'settings_ui.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(437, 600)
Form.setMinimumSize(QtCore.QSize(425, 525))
self.verticalLayout_2 = QtWidgets.QVBoxLayout(Form)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.settingsTabs = QtWidgets.QTabWidget(Form)
self.settingsTabs.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.settingsTabs.sizePolicy().hasHeightForWidth())
self.settingsTabs.setSizePolicy(sizePolicy)
self.settingsTabs.setMinimumSize(QtCore.QSize(410, 0))
self.settingsTabs.setLayoutDirection(QtCore.Qt.LeftToRight)
self.settingsTabs.setAutoFillBackground(True)
self.settingsTabs.setObjectName("settingsTabs")
self.main_tab_4 = QtWidgets.QWidget()
self.main_tab_4.setObjectName("main_tab_4")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.main_tab_4)
self.verticalLayout_6.setContentsMargins(-1, -1, -1, 9)
self.verticalLayout_6.setSpacing(6)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.sweepTypeTabs_4 = QtWidgets.QTabWidget(self.main_tab_4)
self.sweepTypeTabs_4.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sweepTypeTabs_4.sizePolicy().hasHeightForWidth())
self.sweepTypeTabs_4.setSizePolicy(sizePolicy)
self.sweepTypeTabs_4.setMinimumSize(QtCore.QSize(383, 140))
self.sweepTypeTabs_4.setMaximumSize(QtCore.QSize(16777215, 300))
self.sweepTypeTabs_4.setObjectName("sweepTypeTabs_4")
self.linear_tab_4 = QtWidgets.QWidget()
self.linear_tab_4.setObjectName("linear_tab_4")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.linear_tab_4)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.gridLayout_linear_4 = QtWidgets.QGridLayout()
self.gridLayout_linear_4.setObjectName("gridLayout_linear_4")
self.start_label_4 = QtWidgets.QLabel(self.linear_tab_4)
self.start_label_4.setObjectName("start_label_4")
self.gridLayout_linear_4.addWidget(self.start_label_4, 0, 0, 1, 1)
self.comboBox_4 = QtWidgets.QComboBox(self.linear_tab_4)
self.comboBox_4.setObjectName("comboBox_4")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.gridLayout_linear_4.addWidget(self.comboBox_4, 2, 1, 1, 1)
self.MHz_label_7 = QtWidgets.QLabel(self.linear_tab_4)
self.MHz_label_7.setObjectName("MHz_label_7")
self.gridLayout_linear_4.addWidget(self.MHz_label_7, 1, 2, 1, 1)
self.stop_label_4 = QtWidgets.QLabel(self.linear_tab_4)
self.stop_label_4.setObjectName("stop_label_4")
self.gridLayout_linear_4.addWidget(self.stop_label_4, 1, 0, 1, 1)
self.points_label_4 = QtWidgets.QLabel(self.linear_tab_4)
self.points_label_4.setObjectName("points_label_4")
self.gridLayout_linear_4.addWidget(self.points_label_4, 2, 0, 1, 1)
self.lineEdit_start_4 = QtWidgets.QLineEdit(self.linear_tab_4)
self.lineEdit_start_4.setObjectName("lineEdit_start_4")
self.gridLayout_linear_4.addWidget(self.lineEdit_start_4, 0, 1, 1, 1)
self.MHz_label_8 = QtWidgets.QLabel(self.linear_tab_4)
self.MHz_label_8.setObjectName("MHz_label_8")
self.gridLayout_linear_4.addWidget(self.MHz_label_8, 0, 2, 1, 1)
self.lineEdit_stop_4 = QtWidgets.QLineEdit(self.linear_tab_4)
self.lineEdit_stop_4.setObjectName("lineEdit_stop_4")
self.gridLayout_linear_4.addWidget(self.lineEdit_stop_4, 1, 1, 1, 1)
self.verticalLayout_4.addLayout(self.gridLayout_linear_4)
self.sweepTypeTabs_4.addTab(self.linear_tab_4, "")
self.list_tab_4 = QtWidgets.QWidget()
self.list_tab_4.setObjectName("list_tab_4")
self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.list_tab_4)
self.verticalLayout_11.setObjectName("verticalLayout_11")
self.gridLayout_list_5 = QtWidgets.QGridLayout()
self.gridLayout_list_5.setObjectName("gridLayout_list_5")
self.list_label_5 = QtWidgets.QLabel(self.list_tab_4)
self.list_label_5.setObjectName("list_label_5")
self.gridLayout_list_5.addWidget(self.list_label_5, 0, 0, 1, 1)
self.list_MHz_label_5 = QtWidgets.QLabel(self.list_tab_4)
self.list_MHz_label_5.setObjectName("list_MHz_label_5")
self.gridLayout_list_5.addWidget(self.list_MHz_label_5, 0, 2, 1, 1)
self.lineEdit_list_5 = QtWidgets.QLineEdit(self.list_tab_4)
self.lineEdit_list_5.setText("")
self.lineEdit_list_5.setObjectName("lineEdit_list_5")
self.gridLayout_list_5.addWidget(self.lineEdit_list_5, 0, 1, 1, 1)
self.toolButton = QtWidgets.QToolButton(self.list_tab_4)
self.toolButton.setObjectName("toolButton")
self.gridLayout_list_5.addWidget(self.toolButton, 1, 2, 1, 1)
self.label_2 = QtWidgets.QLabel(self.list_tab_4)
self.label_2.setObjectName("label_2")
self.gridLayout_list_5.addWidget(self.label_2, 1, 1, 1, 1)
self.verticalLayout_11.addLayout(self.gridLayout_list_5)
self.sweepTypeTabs_4.addTab(self.list_tab_4, "")
self.verticalLayout_6.addWidget(self.sweepTypeTabs_4)
self.global_settings_frame_4 = QtWidgets.QFrame(self.main_tab_4)
self.global_settings_frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.global_settings_frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.global_settings_frame_4.setObjectName("global_settings_frame_4")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.global_settings_frame_4)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.gridLayout_global_settings_7 = QtWidgets.QGridLayout()
self.gridLayout_global_settings_7.setObjectName("gridLayout_global_settings_7")
self.posMov_label_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.posMov_label_7.setObjectName("posMov_label_7")
self.gridLayout_global_settings_7.addWidget(self.posMov_label_7, 3, 0, 1, 1)
self.Averaging_label_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.Averaging_label_7.setObjectName("Averaging_label_7")
self.gridLayout_global_settings_7.addWidget(self.Averaging_label_7, 2, 0, 1, 1)
self.Averaging_comboBox_7 = QtWidgets.QComboBox(self.global_settings_frame_4)
self.Averaging_comboBox_7.setObjectName("Averaging_comboBox_7")
self.Averaging_comboBox_7.addItem("")
self.Averaging_comboBox_7.addItem("")
self.gridLayout_global_settings_7.addWidget(self.Averaging_comboBox_7, 2, 1, 1, 1)
self.Calibration_radioButton_n_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.Calibration_radioButton_n_7.setChecked(True)
self.Calibration_radioButton_n_7.setObjectName("Calibration_radioButton_n_7")
self.buttonGroup_2 = QtWidgets.QButtonGroup(Form)
self.buttonGroup_2.setObjectName("buttonGroup_2")
self.buttonGroup_2.addButton(self.Calibration_radioButton_n_7)
self.gridLayout_global_settings_7.addWidget(self.Calibration_radioButton_n_7, 1, 2, 1, 1)
self.cont_radioButton_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.cont_radioButton_7.setObjectName("cont_radioButton_7")
self.buttonGroup_3 = QtWidgets.QButtonGroup(Form)
self.buttonGroup_3.setObjectName("buttonGroup_3")
self.buttonGroup_3.addButton(self.cont_radioButton_7)
self.gridLayout_global_settings_7.addWidget(self.cont_radioButton_7, 3, 1, 1, 1)
self.Impedance_label_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.Impedance_label_7.setObjectName("Impedance_label_7")
self.gridLayout_global_settings_7.addWidget(self.Impedance_label_7, 0, 0, 1, 1)
self.res_doubleSpinBox_7 = QtWidgets.QDoubleSpinBox(self.global_settings_frame_4)
self.res_doubleSpinBox_7.setMinimum(0.5)
self.res_doubleSpinBox_7.setMaximum(45.0)
self.res_doubleSpinBox_7.setSingleStep(0.01)
self.res_doubleSpinBox_7.setProperty("value", 5.0)
self.res_doubleSpinBox_7.setObjectName("res_doubleSpinBox_7")
self.gridLayout_global_settings_7.addWidget(self.res_doubleSpinBox_7, 4, 1, 1, 1)
self.res_label_degrees_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.res_label_degrees_7.setObjectName("res_label_degrees_7")
self.gridLayout_global_settings_7.addWidget(self.res_label_degrees_7, 4, 2, 1, 1)
self.Impedance_radioButton_n_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.Impedance_radioButton_n_7.setChecked(True)
self.Impedance_radioButton_n_7.setObjectName("Impedance_radioButton_n_7")
self.buttonGroup = QtWidgets.QButtonGroup(Form)
self.buttonGroup.setObjectName("buttonGroup")
self.buttonGroup.addButton(self.Impedance_radioButton_n_7)
self.gridLayout_global_settings_7.addWidget(self.Impedance_radioButton_n_7, 0, 2, 1, 1)
self.Impedance_radioButton_y_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.Impedance_radioButton_y_7.setObjectName("Impedance_radioButton_y_7")
self.buttonGroup.addButton(self.Impedance_radioButton_y_7)
self.gridLayout_global_settings_7.addWidget(self.Impedance_radioButton_y_7, 0, 1, 1, 1)
self.discrete_radioButton_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.discrete_radioButton_7.setChecked(True)
self.discrete_radioButton_7.setObjectName("discrete_radioButton_7")
self.buttonGroup_3.addButton(self.discrete_radioButton_7)
self.gridLayout_global_settings_7.addWidget(self.discrete_radioButton_7, 3, 2, 1, 1)
self.res_label_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.res_label_7.setObjectName("res_label_7")
self.gridLayout_global_settings_7.addWidget(self.res_label_7, 4, 0, 1, 1)
self.Calibration_label_7 = QtWidgets.QLabel(self.global_settings_frame_4)
self.Calibration_label_7.setObjectName("Calibration_label_7")
self.gridLayout_global_settings_7.addWidget(self.Calibration_label_7, 1, 0, 1, 1)
self.Calibration_radioButton_y_7 = QtWidgets.QRadioButton(self.global_settings_frame_4)
self.Calibration_radioButton_y_7.setObjectName("Calibration_radioButton_y_7")
self.buttonGroup_2.addButton(self.Calibration_radioButton_y_7)
self.gridLayout_global_settings_7.addWidget(self.Calibration_radioButton_y_7, 1, 1, 1, 1)
self.verticalLayout_7.addLayout(self.gridLayout_global_settings_7)
self.verticalLayout_6.addWidget(self.global_settings_frame_4)
self.hardware_settings_gridLayout_6 = QtWidgets.QGridLayout()
self.hardware_settings_gridLayout_6.setVerticalSpacing(6)
self.hardware_settings_gridLayout_6.setObjectName("hardware_settings_gridLayout_6")
self.GPIB_addr_comboBox_6 = QtWidgets.QComboBox(self.main_tab_4)
self.GPIB_addr_comboBox_6.setFrame(True)
self.GPIB_addr_comboBox_6.setObjectName("GPIB_addr_comboBox_6")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.GPIB_addr_comboBox_6.addItem("")
self.hardware_settings_gridLayout_6.addWidget(self.GPIB_addr_comboBox_6, 0, 1, 1, 1)
self.sweep_elevation_spinBox = QtWidgets.QDoubleSpinBox(self.main_tab_4)
self.sweep_elevation_spinBox.setMinimum(-90.0)
self.sweep_elevation_spinBox.setMaximum(90.0)
self.sweep_elevation_spinBox.setSingleStep(0.1)
self.sweep_elevation_spinBox.setProperty("value", 0.0)
self.sweep_elevation_spinBox.setObjectName("sweep_elevation_spinBox")
self.hardware_settings_gridLayout_6.addWidget(self.sweep_elevation_spinBox, 1, 1, 1, 1)
self.GPIB_addr_label_6 = QtWidgets.QLabel(self.main_tab_4)
self.GPIB_addr_label_6.setObjectName("GPIB_addr_label_6")
self.hardware_settings_gridLayout_6.addWidget(self.GPIB_addr_label_6, 0, 0, 1, 1)
self.sweep_elevation_label_6 = QtWidgets.QLabel(self.main_tab_4)
self.sweep_elevation_label_6.setObjectName("sweep_elevation_label_6")
self.hardware_settings_gridLayout_6.addWidget(self.sweep_elevation_label_6, 1, 0, 1, 1)
self.label = QtWidgets.QLabel(self.main_tab_4)
self.label.setObjectName("label")
self.hardware_settings_gridLayout_6.addWidget(self.label, 2, 0, 1, 1)
self.dir_label = QtWidgets.QLabel(self.main_tab_4)
self.dir_label.setObjectName("dir_label")
self.hardware_settings_gridLayout_6.addWidget(self.dir_label, 2, 1, 1, 1)
self.dir_Button = QtWidgets.QPushButton(self.main_tab_4)
self.dir_Button.setObjectName("dir_Button")
self.hardware_settings_gridLayout_6.addWidget(self.dir_Button, 2, 2, 1, 1)
self.open_data_Button = QtWidgets.QPushButton(self.main_tab_4)
self.open_data_Button.setObjectName("open_data_Button")
self.hardware_settings_gridLayout_6.addWidget(self.open_data_Button, 3, 1, 1, 1)
self.verticalLayout_6.addLayout(self.hardware_settings_gridLayout_6)
self.settingsTabs.addTab(self.main_tab_4, "")
self.positioner_tab_4 = QtWidgets.QWidget()
self.positioner_tab_4.setObjectName("positioner_tab_4")
self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.positioner_tab_4)
self.verticalLayout_10.setSpacing(6)
self.verticalLayout_10.setObjectName("verticalLayout_10")
self.arrows_frame_4 = QtWidgets.QFrame(self.positioner_tab_4)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.arrows_frame_4.sizePolicy().hasHeightForWidth())
self.arrows_frame_4.setSizePolicy(sizePolicy)
self.arrows_frame_4.setMinimumSize(QtCore.QSize(383, 159))
self.arrows_frame_4.setFocusPolicy(QtCore.Qt.NoFocus)
self.arrows_frame_4.setAutoFillBackground(False)
self.arrows_frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.arrows_frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.arrows_frame_4.setObjectName("arrows_frame_4")
self.gridLayout = QtWidgets.QGridLayout(self.arrows_frame_4)
self.gridLayout.setObjectName("gridLayout")
self.up_toolButton_4 = QtWidgets.QToolButton(self.arrows_frame_4)
self.up_toolButton_4.setAutoRepeat(True)
self.up_toolButton_4.setAutoExclusive(True)
self.up_toolButton_4.setAutoRepeatDelay(120)
self.up_toolButton_4.setAutoRepeatInterval(120)
self.up_toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.up_toolButton_4.setAutoRaise(False)
self.up_toolButton_4.setArrowType(QtCore.Qt.UpArrow)
self.up_toolButton_4.setObjectName("up_toolButton_4")
self.gridLayout.addWidget(self.up_toolButton_4, 0, 1, 1, 1)
self.left_toolButton_4 = QtWidgets.QToolButton(self.arrows_frame_4)
self.left_toolButton_4.setAutoRepeat(True)
self.left_toolButton_4.setAutoExclusive(True)
self.left_toolButton_4.setAutoRepeatDelay(120)
self.left_toolButton_4.setAutoRepeatInterval(120)
self.left_toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.left_toolButton_4.setAutoRaise(False)
self.left_toolButton_4.setArrowType(QtCore.Qt.LeftArrow)
self.left_toolButton_4.setObjectName("left_toolButton_4")
self.gridLayout.addWidget(self.left_toolButton_4, 1, 0, 1, 1)
self.down_toolButton_4 = QtWidgets.QToolButton(self.arrows_frame_4)
self.down_toolButton_4.setAutoRepeat(True)
self.down_toolButton_4.setAutoExclusive(True)
self.down_toolButton_4.setAutoRepeatDelay(120)
self.down_toolButton_4.setAutoRepeatInterval(120)
self.down_toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.down_toolButton_4.setAutoRaise(False)
self.down_toolButton_4.setArrowType(QtCore.Qt.DownArrow)
self.down_toolButton_4.setObjectName("down_toolButton_4")
self.gridLayout.addWidget(self.down_toolButton_4, 2, 1, 1, 1)
self.right_toolButton_4 = QtWidgets.QToolButton(self.arrows_frame_4)
self.right_toolButton_4.setAutoRepeat(True)
self.right_toolButton_4.setAutoExclusive(True)
self.right_toolButton_4.setAutoRepeatDelay(120)
self.right_toolButton_4.setAutoRepeatInterval(120)
self.right_toolButton_4.setPopupMode(QtWidgets.QToolButton.InstantPopup)
self.right_toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)
self.right_toolButton_4.setAutoRaise(False)
self.right_toolButton_4.setArrowType(QtCore.Qt.RightArrow)
self.right_toolButton_4.setObjectName("right_toolButton_4")
self.gridLayout.addWidget(self.right_toolButton_4, 1, 2, 1, 1)
self.verticalLayout_10.addWidget(self.arrows_frame_4)
self.pos_display_layout_4 = QtWidgets.QGridLayout()
self.pos_display_layout_4.setObjectName("pos_display_layout_4")
self.pan_lcdNumber_4 = QtWidgets.QLCDNumber(self.positioner_tab_4)
self.pan_lcdNumber_4.setMinimumSize(QtCore.QSize(188, 48))
self.pan_lcdNumber_4.setSmallDecimalPoint(False)
self.pan_lcdNumber_4.setDigitCount(7)
self.pan_lcdNumber_4.setProperty("value", 0.0)
self.pan_lcdNumber_4.setObjectName("pan_lcdNumber_4")
self.pos_display_layout_4.addWidget(self.pan_lcdNumber_4, 4, 0, 1, 1)
self.tilt_lcdNumber_4 = QtWidgets.QLCDNumber(self.positioner_tab_4)
self.tilt_lcdNumber_4.setMinimumSize(QtCore.QSize(187, 48))
self.tilt_lcdNumber_4.setSmallDecimalPoint(False)
self.tilt_lcdNumber_4.setDigitCount(7)
self.tilt_lcdNumber_4.setSegmentStyle(QtWidgets.QLCDNumber.Filled)
self.tilt_lcdNumber_4.setObjectName("tilt_lcdNumber_4")
self.pos_display_layout_4.addWidget(self.tilt_lcdNumber_4, 4, 1, 1, 1)
self.pan_label_4 = QtWidgets.QLabel(self.positioner_tab_4)
self.pan_label_4.setFrameShape(QtWidgets.QFrame.Box)
self.pan_label_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.pan_label_4.setAlignment(QtCore.Qt.AlignCenter)
self.pan_label_4.setObjectName("pan_label_4")
self.pos_display_layout_4.addWidget(self.pan_label_4, 3, 0, 1, 1)
self.tilt_label_4 = QtWidgets.QLabel(self.positioner_tab_4)
self.tilt_label_4.setFrameShape(QtWidgets.QFrame.Box)
self.tilt_label_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.tilt_label_4.setAlignment(QtCore.Qt.AlignCenter)
self.tilt_label_4.setObjectName("tilt_label_4")
self.pos_display_layout_4.addWidget(self.tilt_label_4, 3, 1, 1, 1)
self.verticalLayout_10.addLayout(self.pos_display_layout_4)
self.offsetBttns_layout_4 = QtWidgets.QVBoxLayout()
self.offsetBttns_layout_4.setSpacing(6)
self.offsetBttns_layout_4.setObjectName("offsetBttns_layout_4")
self.zeroOffsets_Button_4 = QtWidgets.QPushButton(self.positioner_tab_4)
self.zeroOffsets_Button_4.setObjectName("zeroOffsets_Button_4")
self.offsetBttns_layout_4.addWidget(self.zeroOffsets_Button_4)
self.aligntoCenterButton_4 = QtWidgets.QPushButton(self.positioner_tab_4)
self.aligntoCenterButton_4.setObjectName("aligntoCenterButton_4")
self.offsetBttns_layout_4.addWidget(self.aligntoCenterButton_4)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.offsetBttns_layout_4.addLayout(self.horizontalLayout_2)
self.verticalLayout_10.addLayout(self.offsetBttns_layout_4)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_10.addItem(spacerItem)
self.settingsTabs.addTab(self.positioner_tab_4, "")
self.verticalLayout_2.addWidget(self.settingsTabs)
self.settings_bttns_frame_4 = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.settings_bttns_frame_4.sizePolicy().hasHeightForWidth())
self.settings_bttns_frame_4.setSizePolicy(sizePolicy)
self.settings_bttns_frame_4.setMinimumSize(QtCore.QSize(0, 100))
self.settings_bttns_frame_4.setMaximumSize(QtCore.QSize(16777215, 100))
self.settings_bttns_frame_4.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.settings_bttns_frame_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.settings_bttns_frame_4.setObjectName("settings_bttns_frame_4")
self.verticalLayout = QtWidgets.QVBoxLayout(self.settings_bttns_frame_4)
self.verticalLayout.setContentsMargins(-1, 0, -1, -1)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.buttonBox = QtWidgets.QDialogButtonBox(self.settings_bttns_frame_4)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.horizontalLayout.addWidget(self.buttonBox)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayout_2.addWidget(self.settings_bttns_frame_4)
self.retranslateUi(Form)
self.settingsTabs.setCurrentIndex(0)
self.sweepTypeTabs_4.setCurrentIndex(0)
self.GPIB_addr_comboBox_6.setCurrentIndex(16)
self.buttonBox.rejected.connect(Form.close)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Settings"))
self.start_label_4.setText(_translate("Form", "Start : "))
self.comboBox_4.setItemText(0, _translate("Form", "201"))
self.comboBox_4.setItemText(1, _translate("Form", "401"))
self.comboBox_4.setItemText(2, _translate("Form", "801"))
self.comboBox_4.setItemText(3, _translate("Form", "1601"))
self.MHz_label_7.setText(_translate("Form", "MHz"))
self.stop_label_4.setText(_translate("Form", "Stop :"))
self.points_label_4.setText(_translate("Form", "Points : "))
self.MHz_label_8.setText(_translate("Form", "MHz"))
self.sweepTypeTabs_4.setTabText(self.sweepTypeTabs_4.indexOf(self.linear_tab_4), _translate("Form", "Sweep"))
self.list_label_5.setText(_translate("Form", "Frequencies : "))
self.list_MHz_label_5.setText(_translate("Form", "MHz"))
self.lineEdit_list_5.setPlaceholderText(_translate("Form", "1, 2, 3, 4, . . ."))
self.toolButton.setText(_translate("Form", "Import"))
self.label_2.setText(_translate("Form", "Input Format: 1, 2, 3, 4, . . ."))
self.sweepTypeTabs_4.setTabText(self.sweepTypeTabs_4.indexOf(self.list_tab_4), _translate("Form", "List"))
self.posMov_label_7.setText(_translate("Form", "Positioner Movment : "))
self.Averaging_label_7.setText(_translate("Form", "Averaging : "))
self.Averaging_comboBox_7.setItemText(0, _translate("Form", "8"))
self.Averaging_comboBox_7.setItemText(1, _translate("Form", "16"))
self.Calibration_radioButton_n_7.setText(_translate("Form", "No"))
self.cont_radioButton_7.setText(_translate("Form", "continous"))
self.Impedance_label_7.setText(_translate("Form", "Impedance : "))
self.res_label_degrees_7.setText(_translate("Form", "degrees"))
self.Impedance_radioButton_n_7.setText(_translate("Form", "No"))
self.Impedance_radioButton_y_7.setText(_translate("Form", "Yes"))
self.discrete_radioButton_7.setText(_translate("Form", "discrete"))
self.res_label_7.setText(_translate("Form", "Resolution : "))
self.Calibration_label_7.setText(_translate("Form", "Calibration : "))
self.Calibration_radioButton_y_7.setText(_translate("Form", "Yes "))
self.GPIB_addr_comboBox_6.setItemText(0, _translate("Form", "0"))
self.GPIB_addr_comboBox_6.setItemText(1, _translate("Form", "1"))
self.GPIB_addr_comboBox_6.setItemText(2, _translate("Form", "2"))
self.GPIB_addr_comboBox_6.setItemText(3, _translate("Form", "3"))
self.GPIB_addr_comboBox_6.setItemText(4, _translate("Form", "4"))
self.GPIB_addr_comboBox_6.setItemText(5, _translate("Form", "5"))
self.GPIB_addr_comboBox_6.setItemText(6, _translate("Form", "6"))
self.GPIB_addr_comboBox_6.setItemText(7, _translate("Form", "7"))
self.GPIB_addr_comboBox_6.setItemText(8, _translate("Form", "8"))
self.GPIB_addr_comboBox_6.setItemText(9, _translate("Form", "9"))
self.GPIB_addr_comboBox_6.setItemText(10, _translate("Form", "10"))
self.GPIB_addr_comboBox_6.setItemText(11, _translate("Form", "11"))
self.GPIB_addr_comboBox_6.setItemText(12, _translate("Form", "12"))
self.GPIB_addr_comboBox_6.setItemText(13, _translate("Form", "13"))
self.GPIB_addr_comboBox_6.setItemText(14, _translate("Form", "14"))
self.GPIB_addr_comboBox_6.setItemText(15, _translate("Form", "15"))
self.GPIB_addr_comboBox_6.setItemText(16, _translate("Form", "16"))
self.GPIB_addr_comboBox_6.setItemText(17, _translate("Form", "17"))
self.GPIB_addr_comboBox_6.setItemText(18, _translate("Form", "18"))
self.GPIB_addr_comboBox_6.setItemText(19, _translate("Form", "19"))
self.GPIB_addr_comboBox_6.setItemText(20, _translate("Form", "20"))
self.GPIB_addr_comboBox_6.setItemText(21, _translate("Form", "21"))
self.GPIB_addr_comboBox_6.setItemText(22, _translate("Form", "22"))
self.GPIB_addr_comboBox_6.setItemText(23, _translate("Form", "23"))
self.GPIB_addr_comboBox_6.setItemText(24, _translate("Form", "24"))
self.GPIB_addr_comboBox_6.setItemText(25, _translate("Form", "25"))
self.GPIB_addr_comboBox_6.setItemText(26, _translate("Form", "26"))
self.GPIB_addr_comboBox_6.setItemText(27, _translate("Form", "27"))
self.GPIB_addr_comboBox_6.setItemText(28, _translate("Form", "28"))
self.GPIB_addr_comboBox_6.setItemText(29, _translate("Form", "29"))
self.GPIB_addr_comboBox_6.setItemText(30, _translate("Form", "30"))
self.GPIB_addr_label_6.setText(_translate("Form", "GPIB Address : "))
self.sweep_elevation_label_6.setText(_translate("Form", "Elevation : "))
self.label.setText(_translate("Form", "Project Directory:"))
self.dir_Button.setText(_translate("Form", "Select"))
self.open_data_Button.setText(_translate("Form", "Open Previous Measurement"))
self.settingsTabs.setTabText(self.settingsTabs.indexOf(self.main_tab_4), _translate("Form", "Main Settings"))
self.up_toolButton_4.setText(_translate("Form", "+ EL"))
self.up_toolButton_4.setShortcut(_translate("Form", "W"))
self.left_toolButton_4.setText(_translate("Form", "CCW"))
self.left_toolButton_4.setShortcut(_translate("Form", "A"))
self.down_toolButton_4.setText(_translate("Form", "- EL"))
self.down_toolButton_4.setShortcut(_translate("Form", "S"))
self.right_toolButton_4.setText(_translate("Form", "CW"))
self.right_toolButton_4.setShortcut(_translate("Form", "D"))
self.pan_label_4.setText(_translate("Form", "Azimuth"))
self.tilt_label_4.setText(_translate("Form", "Elevation"))
self.zeroOffsets_Button_4.setText(_translate("Form", "Zero Offsets"))
self.aligntoCenterButton_4.setText(_translate("Form", "Align to Center"))
self.settingsTabs.setTabText(self.settingsTabs.indexOf(self.positioner_tab_4), _translate("Form", "Positioner Settings"))
|
#!/usr/bin/env python
#
# glossaries.py
#
# Copyright © 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# stdlib
import os
import pathlib
from typing import Dict, Union
# 3rd party
import yaml
# this package
from py2latex.markdown_parser import parse_markdown
__all__ = ["escape_prefix", "glossary_from_file", "load_glossary", "make_glossary"]
def load_glossary(glossary_file: Union[str, pathlib.Path, os.PathLike]) -> Dict[str, Dict[str, str]]:
if not isinstance(glossary_file, pathlib.Path):
glossary_file = pathlib.Path(glossary_file)
# todo: Validate
with glossary_file.open() as fp:
glossary = yaml.load(fp, Loader=yaml.FullLoader)
for entry, data in glossary["acronyms"].items():
data["name"] = parse_markdown(data["name"]).strip()
data["text"] = parse_markdown(data["text"]).strip()
for entry, data in glossary["glossary"].items():
data["name"] = parse_markdown(data["name"]).strip()
data["text"] = parse_markdown(data["text"]).strip()
data["description"] = parse_markdown(data["description"]).strip()
return glossary
def escape_prefix(prefix: str) -> str:
prefix = prefix.replace(' ', "\\ ")
return prefix
def make_glossary(glossary_dict):
out = ''
for acronym, data in glossary_dict["acronyms"].items():
out += r"\newacronym"
prefixes = []
if "prefixfirst" in data:
prefixes.append(f"prefixfirst={{{escape_prefix(data['prefixfirst'])}}}")
if "prefix" in data:
prefixes.append(f"prefix={{{escape_prefix(data['prefix'])}}}")
if prefixes:
out += f"[{', '.join(prefixes)}]"
out += f"{{{acronym}}}{{{data['name']}}}{{{data['text']}}}\n"
for item, data in glossary_dict["glossary"].items():
out += fr"""
\newglossaryentry{{{item}}}
{{
name={{{data['name']}}},
text={{{data['text']}}},
description={{{data['description']}}},
}}"""
return out
def glossary_from_file(filename):
return make_glossary(load_glossary(filename))
|
#!/usr/bin/env python
import urllib2
import urllib
import sys
import main
request = dict(
token='xxxxx',
team_id='T0001',
channel_id='C2147483705',
channel_name='test',
timestamp='1355517523.000005',
user_id='U2147483697',
user_name='tester',
text=' '.join(sys.argv[1:]),
)
event = dict(body=urllib.urlencode(request))
print main.lambda_handler(event, None)
|
from collections.abc import Callable
import torch
import hydra.utils as hyu
import omegaconf.omegaconf
import logging
import typing
log = logging.getLogger(__name__)
__all__ = ["Collection"]
class Collection(Callable): #TODO: inherit from UserList as well, need to update items to iteration
def __init__(self,
items: omegaconf.DictConfig,
arguments: typing.Sequence[typing.Any]=None,
name: str="items",
):
self.name = name
items = [hyu.instantiate(item) for item in items.values()]\
if not arguments else\
[hyu.instantiate(item, arg) for item, arg in zip(items.values(), arguments)]
setattr(self, name, items)
if arguments and len(items) != len(arguments):
log.warning(f"Inconsistent item ({len(items)}) and argument ({len(arguments)}) count, the matching subset is only used.")
def items(self) -> typing.Iterable[typing.Any]:
return getattr(self, self.name)
def __call__(self, tensors: typing.Dict[str, torch.Tensor]) -> None:
for item in self.items():
item(tensors) |
# For a given list of integers and integer K, find the number of non-empty subsets S such that min(S) + max(S) <= K.
# Example 1:
# nums = [2, 4, 5, 7]
# k = 8
# Output: 5
# Explanation: [2], [4], [2, 4], [2, 4, 5], [2, 5]
# Example 2:
# nums = [1, 4, 3, 2]
# k = 8
# Output: 15
# Explanation: 16 (2^4) - 1 (empty set) = 15
# Example 3:
# nums = [2, 4, 2, 5, 7]
# k = 10
# Output: 27
# Explanation: 31 (2^5 - 1) - 4 ([7], [5, 7], [4, 5, 7], [4, 7]) = 27
def countSubsets(nums,k):
nums.sort()
count = 0
for i in range(len(nums)):
for j in range(i,len(nums)):
if nums[i]+nums[j]>k:
break
if nums[i]+nums[j]<=k:
count += 2**((j-i-1) if j-i>1 else 0)
print(count)
return count
countSubsets([2, 4, 5, 7], 8)
countSubsets([1,2,3,4], 8)
countSubsets([2, 4, 2, 5, 7], 10)
# 当 nums[i] +nums[j] <k时, 符合要求的组合就是 i到j以内数字的所有集合 append nums[i] 和 nums[j]
# 比如[2, 4, 5, 7], k=8 : i=0,j =2 subsets = [2,5] [2,4,5]
# 比如[1,2,3,4], k=8 : i=0, j=3 subsets =[1,4], [1,2,4],[1,2,3,4] [1,3,4]
# 当i=j subset 就是 一个数字 【nums[i]]
|
from upy2 import u, U, undarray
from upy2.typesetting.scientific import ScientificTypesetter
st = ScientificTypesetter(2, 2)
st.default()
uprov = U(2)
uprov.default()
print undarray(10, 2)
print [10] +- u([2])
print 10 +- u(2)
|
# Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the logic for loading vocabularies from file."""
import csv
import pathlib
from labm8.py import app
from labm8.py import humanize
def LoadVocabulary(
dataset_root: pathlib.Path,
model_name: str,
max_items: int = 0,
target_cumfreq: float = 1.0,
):
vocab_csv = dataset_root / "vocab" / f"{model_name}.csv"
vocab = {}
cumfreq = 0
with open(vocab_csv) as f:
vocab_file = csv.reader(f.readlines(), delimiter="\t")
for i, row in enumerate(vocab_file, start=-1):
if i == -1: # Skip the header.
continue
(cumfreq, _, _, text) = row
cumfreq = float(cumfreq)
vocab[text] = i
if cumfreq >= target_cumfreq:
app.Log(2, "Reached target cumulative frequency: %.3f", target_cumfreq)
break
if max_items and i >= max_items - 1:
app.Log(2, "Reached max vocab size: %d", max_items)
break
app.Log(
1,
"Selected %s-element vocabulary achieving %.2f%% node text coverage",
humanize.Commas(len(vocab)),
cumfreq * 100,
)
return vocab
|
import os
import sys
from bs4 import BeautifulSoup
import pytest
from mock import Mock
from app import create_app
# Don't import app.settings to avoid importing google.appengine.ext
sys.modules['app.settings'] = Mock()
sys.modules['app.gaesession'] = Mock()
AUTH_USERNAME = 'user'
AUTH_PASSWORD = 'pass'
@pytest.yield_fixture(scope='session')
def app():
_app = create_app(**{
'TESTING': True,
'PREFERRED_URL_SCHEME': 'http',
'ADMIN_CLIENT_ID': 'admin',
'ADMIN_CLIENT_SECRET': 'secret',
'API_BASE_URL': 'http://na_api_base',
'SECRET_KEY': 'secret_key',
'AUTH_USERNAME': AUTH_USERNAME,
'AUTH_PASSWORD': AUTH_PASSWORD,
'OAUTHLIB_INSECURE_TRANSPORT': True,
'WTF_CSRF_ENABLED': False,
})
ctx = _app.app_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture
def os_environ():
"""
clear os.environ, and restore it after the test runs
"""
# for use whenever you expect code to edit environment variables
old_env = os.environ.copy()
class EnvironDict(dict):
def __setitem__(self, key, value):
assert type(value) == str
super(EnvironDict, self).__setitem__(key, value)
os.environ = EnvironDict()
yield
os.environ = old_env
@pytest.fixture
def user_not_authenticated(mocker):
mocker.patch('app.session', return_value={})
def request(url, method, data=None, headers=None):
r = method(url, data=data, headers=headers)
r.soup = BeautifulSoup(r.get_data(as_text=True), 'html.parser')
return r
@pytest.fixture(scope='function')
def client(app, user_not_authenticated):
with app.test_request_context(), app.test_client() as client:
yield client
@pytest.fixture
def logged_in(mocker, user_not_authenticated):
class Request(object):
class Authorization(object):
username = AUTH_USERNAME
password = AUTH_PASSWORD
authorization = Authorization
mocker.patch("app.main.views.request", Request)
@pytest.fixture
def invalid_log_in(mocker):
class Request(object):
class Authorization(object):
username = "invalid"
password = "wrong"
authorization = Authorization
mocker.patch("app.main.views.request", Request)
@pytest.fixture
def sample_future_events(mocker):
events = [
{
"title": "Test title 1",
"event_type": "Talk",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2018-12-30 19:00"
}],
"event_state": "approved"
},
{
"title": "Test title 2",
"event_type": "Talk",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2018-12-31 19:00"
}],
"event_state": "approved"
},
{
"title": "Test title 3",
"event_type": "Introductory Course",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2019-01-01 19:00"
}],
"event_monthyear": "January 2019",
"event_state": "approved"
},
{
"title": "Test title 4",
"event_type": "Workshop",
"image_filename": "",
"event_dates": [{
"event_datetime": "2019-01-02 19:00"
}],
"event_state": "approved"
},
]
mocker.patch(
"app.clients.api_client.ApiClient.get",
return_value=events
)
return events
@pytest.fixture
def sample_past_events_for_cards():
events = [
{
"title": "Test title 5",
"event_type": "Talk",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2018-12-30 19:00"
}],
"event_state": "approved"
},
{
"title": "Test title 6",
"event_type": "Talk",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2018-12-31 19:00"
}],
"event_state": "approved"
},
{
"title": "Test title 7",
"event_type": "Introductory Course",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2019-01-01 19:00"
}],
"event_monthyear": "January 2019",
"event_state": "approved"
},
{
"title": "Test title 8",
"event_type": "Workshop",
"image_filename": "",
"event_dates": [{
"event_datetime": "2019-01-02 19:00"
}],
"event_state": "approved"
},
]
return events
@pytest.fixture
def sample_future_event_for_cards():
events = [
{
"title": "Test title 1",
"event_type": "Talk",
"image_filename": "event.png",
"event_dates": [{
"event_datetime": "2018-12-30 19:00"
}],
"event_state": "approved"
},
]
return events
@pytest.fixture
def sample_articles_summary(mocker):
articles = [
{
'title': 'Article title 1',
'short_content':
'some short content 1, some short content 1, some short content 1, some short content 1'
},
{
'title': 'Article title 2',
'short_content':
'some short content 2, some short content 2, some short content 2, some short content 2'
},
{
'title': 'Article title 3',
'short_content':
'some short content 3, some short content 3, some short content 3, some short content 3'
},
{
'title': 'Article title 4',
'short_content':
'some short content 4, some short content 4, some short content 4, some short content 4'
}
]
mocker.patch(
"app.clients.api_client.ApiClient.get_articles_summary",
return_value=articles
)
return articles
def mock_sessions(mocker, session_dict={}):
mocker.patch('app.session', session_dict)
mocker.patch('app.main.views.session', session_dict)
mocker.patch('app.main.views.admin.admin.session', session_dict)
mocker.patch('app.main.views.admin.emails.session', session_dict)
mocker.patch('app.main.views.admin.events.session', session_dict)
mocker.patch('app.main.views.os.environ', session_dict)
@pytest.fixture
def mock_admin_logged_in(mocker):
session_dict = {
'user': {
'id': 'test_id',
'access_area': 'admin'
},
'user_profile': {
'name': 'test name',
'email': 'test@example.com'
},
}
mock_sessions(mocker, session_dict)
|
# -*- coding: utf-8 -*-
"""
transistor.tests.conftest
~~~~~~~~~~~~
This module defines pytest fixtures and other constants available to all tests.
:copyright: Copyright (C) 2018 by BOM Quote Limited
:license: The MIT License, see LICENSE for more details.
~~~~~~~~~~~~
"""
import pytest
from kombu import Connection
from kombu.pools import producers
from pathlib import Path
from os.path import dirname as d
from os.path import abspath
from requests.adapters import HTTPAdapter
from transistor import SplashBrowser
from transistor import BaseGroup
from transistor.persistence.newt_db.collections import SpiderList
from examples.books_to_scrape.workgroup import BooksWorker
from examples.books_to_scrape.persistence.newt_db import ndb
from transistor.schedulers.brokers.queues import ExchangeQueue
from transistor import StatefulBook, WorkGroup
from transistor.persistence.exporters.exporters import CsvItemExporter
from transistor.persistence.newt_db.collections import SpiderLists
from examples.books_to_scrape.scraper import BooksToScrapeScraper
from examples.books_to_scrape.manager import BooksWorkGroupManager
from examples.books_to_scrape.persistence.serialization import (
BookItems, BookItemsLoader)
root_dir = d(d(abspath(__file__)))
def get_html(filename):
"""
Get the appropriate html testfile and return it. Filename should include
the folder the file is in.
:param filename: ex. -> "digidog/digidog_china_multiple.html"
"""
data_folder = Path(root_dir)
file_to_open = data_folder / filename
f = open(file_to_open, encoding='utf-8')
return f.read()
def get_file_path(filename):
"""
Find the book_titles excel file path.
"""
root = Path(root_dir)
filepath = root / 'tests' / 'books_toscrape' / filename
return r'{}'.format(filepath)
@pytest.fixture(scope='function')
def test_dict():
"""
Need to set dict[_test_page_text] = get_html()
:return dict
"""
return {"_test_true": True, "_test_page_text": '', "_test_status_code": 200,
"autostart": True}
@pytest.fixture(scope='function')
def _BooksWorker():
"""
Create a BooksWorker which saves jobs to ndb.root._spiders.
"""
class _BooksWorker(BooksWorker):
"""
A _BooksWorker instance which overrides the process_exports method to
make it useful for testing.
"""
def pre_process_exports(self, spider, task):
if self.job_id is not 'NONE':
try:
# create the list with the job name if it doesnt already exist
ndb.root._spiders.add(self.job_id, SpiderList())
print(
f'Worker {self.name}-{self.number} created a new scrape_list '
f'for {self.job_id}')
except KeyError:
# will be raised if there is already a list with the same job_name
pass
# export the scraper data to the items object
items = self.load_items(spider)
# save the items object to newt.db
ndb.root._spiders[self.job_id].add(items)
ndb.commit()
print(f'Worker {self.name}-{self.number} saved {items.__repr__()} to '
f'scrape_list "{self.job_id}" for task {task}.')
else:
# if job_id is NONE then we'll skip saving the objects
print(
f'Worker {self.name}-{self.number} said job_name is {self.job_id} '
f'so will not save it.')
def post_process_exports(self, spider, task):
"""
A hook point for customization after process_exports.
In this example, we append the returned scraper object to a
class attribute called `events`.
"""
self.events.append(spider)
print(f'{self.name} has {spider.stock} inventory status.')
print(f'pricing: {spider.price}')
print(f'Worker {self.name}-{self.number} finished task {task}')
return _BooksWorker
@pytest.fixture(scope='function')
def splash_browser():
"""
A SplashBrowser instance for the unit tests.
:return:
"""
browser = SplashBrowser(
soup_config={'features': 'lxml'},
requests_adapters={'http://': HTTPAdapter(max_retries=5)})
return browser
def get_job_results(job_id):
"""
A ndb helper method that manipulates the _scraper object.
"""
return ndb.root._spiders.lists[job_id].results
def delete_job(job_id):
"""
A ndb helper method that manipulates the _scraper object.
"""
try:
del ndb.root._spiders.lists[job_id]
ndb.commit()
except KeyError:
pass
@pytest.fixture(scope='function')
def bts_static_scraper(test_dict):
"""
A BooksToScrapeScraper static test fixture.
"""
book_title = 'Soumission'
page = get_html("tests/books_toscrape/books_toscrape_index.html")
test_dict['_test_page_text'] = page
test_dict['url'] = 'http://books.toscrape.com'
scraper = BooksToScrapeScraper(book_title=book_title, **test_dict)
scraper.start_http_session()
return scraper
@pytest.fixture(scope='function')
def bts_live_scraper():
"""
A BooksToScrapeScraper live fixture for TestLiveBooksToScrape.
"""
scraper = BooksToScrapeScraper(book_title='Black Dust')
scraper.start_http_session(url='http://books.toscrape.com')
return scraper
@pytest.fixture(scope='function')
def bts_book_manager(_BooksWorker):
"""
A BooksToScrape Manager test fixture for live network call.
Here, we are spinning up two workers, while we have three
tasks. It is important to test this as such, in spinning up
a less number of workers vs total tasks. There are plenty
of ways to break this test when refactoring. One likely
source would be the BaseWorker class method `load_items`.
It took me half-a-day to track down a bug in that method
which resulted in this test only working if the # workers
was equal to the number of tasks. That was the previous
default way to run this test, so the bug went un-found.
"""
# first, setup newt.db for testing
ndb.root._spiders = SpiderLists()
ndb.commit()
# ensure to open this file in binary mode
book_data_file = open('c:/temp/book_data.csv', 'a+b')
exporters = [
CsvItemExporter(
fields_to_export=['book_title', 'stock', 'price'],
file=book_data_file,
encoding='utf_8_sig'
)
]
file = get_file_path('book_titles.xlsx')
trackers = ['books.toscrape.com']
tasks = StatefulBook(file, trackers, keywords='titles', autorun=True)
groups = [
WorkGroup(
name='books.toscrape.com',
url='http://books.toscrape.com/',
spider=BooksToScrapeScraper,
worker=_BooksWorker,
items=BookItems,
loader=BookItemsLoader,
exporters=exporters,
workers=2, # this creates 2 scrapers and assigns each a book as a task
kwargs={'timeout': (3.0, 20.0)})
]
manager = BooksWorkGroupManager('books_scrape', tasks, workgroups=groups, pool=5)
yield manager
# teardown
delete_job('books_scrape')
del ndb.root._spiders
ndb.commit()
@pytest.fixture(scope='function')
def broker_tasks(broker_conn):
trackers = ['books.toscrape.com']
tasks = ExchangeQueue(trackers)
# explicitly declare the queues
for queue in tasks.task_queues:
queue(broker_conn).declare()
return tasks
@pytest.fixture(scope='function')
def broker_conn():
"""
A Kombu connection object. Connect with RabbitMQ or Redis.
"""
connection = Connection("pyamqp://guest:guest@localhost:5672//")
# connection = Connection("redis://127.0.0.1:6379")
return connection
@pytest.fixture(scope='function')
def bts_broker_manager(_BooksWorker, broker_tasks, broker_conn):
"""
A BooksToScrape Manager test fixture for live network call.
Here, we use a broker (RabbitMQ) to test.
"""
# setup newt.db for testing
ndb.root._spiders = SpiderLists()
ndb.commit()
# ensure to open this file in binary mode
book_data_file = open('c:/temp/broker_data.csv', 'a+b')
exporters = [
CsvItemExporter(
fields_to_export=['book_title', 'stock', 'price'],
file=book_data_file,
encoding='utf_8_sig'
)
]
groups = [
WorkGroup(
name='books.toscrape.com',
url='http://books.toscrape.com/',
spider=BooksToScrapeScraper,
worker=_BooksWorker,
items=BookItems,
loader=BookItemsLoader,
exporters=exporters,
workers=2, # this creates 2 scrapers and assigns each a book as a task
kwargs={'timeout': (3.0, 20.0)})
]
manager = BooksWorkGroupManager('books_broker_scrape', broker_tasks,
workgroups=groups, pool=5, connection=broker_conn)
yield manager
# teardown newt.db
delete_job('books_broker_scrape')
del ndb.root._spiders
ndb.commit() |
import tkinter as tk
from tkinter import Scrollbar, Listbox, Frame
from datetime import datetime
class Console:
master = None
mainContainer = None
console = None
def __init__(self, master):
self.master = master
self.mainContainer = Frame(self.master)
self.mainContainer.pack(side=tk.BOTTOM, fill=tk.BOTH)
self.showConsole()
def showConsole(self):
consoleContainer = Frame(self.mainContainer, bg="black", height=100)
consoleContainer.pack(side=tk.BOTTOM, fill=tk.X)
self.prepConsole(consoleContainer)
def prepConsole(self, container):
scrollbar = Scrollbar(container)
self.console = Listbox(container, bg="white", yscrollcommand=scrollbar.set)
self.console.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.console.pack_propagate(0)
scrollbar.pack(side=tk.LEFT, fill=tk.Y)
scrollbar.config(command=self.console.yview)
def consoleInsert(self, event, color=""):
self.console.insert(tk.END, event)
if color != "":
self.console.itemconfig(self.console.size() - 1, {"fg": color})
self.console.yview(tk.END)
if self.console.size() > 200:
self.console.delete(0, 100)
def clear(self):
self.console.delete(0)
def insertProcess(self, event):
self.consoleInsert(str(datetime.now()) + " " + event, "blue")
def insertSuccess(self, event):
self.consoleInsert(str(datetime.now()) + " " + event, "green")
def insertWarn(self, event):
self.consoleInsert(str(datetime.now()) + " " + event, "orange")
def insertFailed(self, event):
self.consoleInsert(str(datetime.now()) + " " + event, "red")
|
from google.cloud import storage
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
import os
import time
import inspect
import praw
from tqdm import tqdm
import ConfigParser
# Maybe add async upload? https://cloud.google.com/appengine/docs/standard/python/datastore/async
# Load config file. Note that praw loads the same file on its own to initialize itself.
config = ConfigParser.ConfigParser()
config.read('praw.ini')
# Prepare to use speech
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = config.get('google', 'application_credentials')
client = speech.SpeechClient()
speech_config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.FLAC,
# sample_rate_hertz=16000,
language_code='en-US',
enable_automatic_punctuation=True
# enable_speaker_diarization=True,
# diarization_speaker_count=3
)
# Prepare to use Reddit
reddit = praw.Reddit('reddit')
my_sub = config.get('reddit', 'subreddit')
wiki = reddit.subreddit(my_sub).wiki
# fetch audio
storage_client = storage.Client()
bucket = storage_client.get_bucket(config.get('google', 'bucket'))
audio = {}
for blob in bucket.list_blobs():
squished_URI = 'gs://{}/{}'.format(config.get('google', 'bucket'), blob.name)
audio[str(blob.name.rsplit('.')[0])] = types.RecognitionAudio(uri=squished_URI)
jobs = {}
output = {}
pbars = {}
# Make a new Speech job for every clip in the audio dict
for name, job in audio.items():
jobs[name] = client.long_running_recognize(speech_config, job)
# print(name + " transcription request submitted.")
pbars[name] = tqdm(total=100, desc=name)
# Wait until the jobs are done, update progress bars, and finally put their results in the output dict.
while len(jobs) > 0:
time.sleep(20)
for name, job in jobs.items():
if job.done() == False:
pbars[name].update(job.metadata.progress_percent)
else:
output[name] = job
jobs.pop(name)
pbars[name].close()
# Process the results in the output dict
for name, result in output.items():
# parse some handy names for later
backup_file = 'backup/' + name + '.txt'
wiki_address = 'episodes/' + name.rsplit('-')[1]
# open a local backup file
if not os.path.isdir('backup'):
os.mkdir('backup')
file = open(backup_file,'w')
# concat all of the segments, close the backup
for paragraph in range(0,len(result._result.results)):
file.write(result._result.results[paragraph].alternatives[0].transcript)
file.close()
# push the result up to a similarly named page on the wiki
transcript = open(backup_file,'r') # why concat twice?
if os.path.isfile('template.txt'):
template = open('template.txt', 'r')
transcript_with_template = template.read() + transcript.read()
template.close()
else:
transcript_with_template = transcript
wiki[wiki_address].edit(transcript_with_template, reason='Init')
transcript.close()
# response = operation.result(timeout=90)
# # Each result is for a consecutive portion of the audio. Iterate through
# # them to get the transcripts for the entire audio file.
# for result in response.results:
# # The first alternative is the most likely one for this portion.
# print(u'Transcript: {}'.format(result.alternatives[0].transcript))
# print('Confidence: {}'.format(result.alternatives[0].confidence))
|
import KratosMultiphysics as Kratos
import KratosMultiphysics.KratosUnittest as KratosUnittest
import KratosMultiphysics.DEMApplication as DEM
# This test consists in applying OMP_DEMSearch to assign, to each node in
# a 'Base' model part, the corresponding neighbors from a 'Target' model part.
# The results calculated by Kratos are compared to the expected results. We
# assume the domain to be a periodic parallepiped, so that the closest neighbors
# are to be found accross the boundaries in some cases.
# In particular, it is checked that the search is sharp, in the sense that if we
# vary the search radius by a small amount (epsilon), neighbors placed strategically
# at a distance equal to the base search radius are either found or not.
class TestSearchNodes(KratosUnittest.TestCase):
def setUp(self):
# input parameters:
dx = self.unit_length = 0.1
self.epsilon = 0.0001 # small distance by which we modify the sensitivity to search radius
self.domain_box = [0, 0, 0, 10*dx, 10*dx, 10*dx]
# creating a search tool with a periodic bounding box
dimension = int(len(self.domain_box) / 2)
self.domain_box_periods = [self.domain_box[i + dimension] - self.domain_box[i] for i in range(dimension)]
self.search_strategy = DEM.OMP_DEMSearch(*self.domain_box)
# creating data containers and initializing global variables
self.next_node_id = 1
self.all_points_db = dict()
self.expected_distances_db = dict()
self.base_ids = []
self.target_ids = []
# creating model parts
self.model = Kratos.Model()
self.base_model_part = self.model.CreateModelPart('Base')
self.target_model_part = self.model.CreateModelPart('Target')
# creating nodes
self.CreateNodes()
def test_SearchNodesInTargetModelPart(self):
dx = self.unit_length
epsilon = self.epsilon
neighbors, distances = [], []
# setting search radius just about large enough to catch some of the potential neighbors
search_radius = 2*dx + epsilon
radii = [search_radius for node in self.base_model_part.Nodes]
self.search_strategy.SearchNodesInRadiusExclusive(self.base_model_part.Nodes,
self.target_model_part.Nodes,
radii,
DEM.VectorResultNodesContainer(),
DEM.VectorDistances(),
neighbors,
distances)
self.AssertCorrectnessOfNeighbors(neighbors, search_radius)
self.AssertCorrectnessOfDistances(distances, neighbors)
neighbors, distances = [], []
search_radius = 2*dx - epsilon
# setting search radius a tad too short to catch some of the potential neighbors
radii = [search_radius for node in self.base_model_part.Nodes]
self.search_strategy.SearchNodesInRadiusExclusive(self.base_model_part.Nodes,
self.target_model_part.Nodes,
radii,
DEM.VectorResultNodesContainer(),
DEM.VectorDistances(),
neighbors,
distances)
self.AssertCorrectnessOfNeighbors(neighbors, search_radius)
self.AssertCorrectnessOfDistances(distances, neighbors)
def AssertCorrectnessOfNeighbors(self, neighbors, search_radius):
obtained_neighbors = [frozenset(l) for l in neighbors]
expected_neighbors = self.GetExpectedListsOfNeighbors(search_radius)
self.assertEqual(obtained_neighbors, expected_neighbors)
def AssertCorrectnessOfDistances(self, distances, neighbor_lists):
for i, i_neighbors in enumerate(neighbor_lists):
for j, neigh_id in enumerate(i_neighbors):
expected_distance = self.GetDistanceById(self.base_ids[i], neigh_id)
obtained_distance = distances[i][j]
self.assertAlmostEqual(expected_distance, obtained_distance)
def CreateNodes(self):
dx = self.unit_length
# Base nodes, for which neighbors must be searched for
# ------------------------------------------
B1 = [dx, dx, dx]
B2 = [0.9*dx, 0.9*dx, 0.9*dx]
base_points = [B1, B2]
# Target nodes, which are search candidates
# ------------------------------------------
# the first one is simply displaced two length units in the x-direction with respect to
# the first base node
T1 = [3*dx, dx, dx]
# the second candidate is displaced along the diagonal two length units towards the origin,
# so that it comes in through the opposite end of the diagonal
T2 = [(11 - 2.0/3**0.5) * dx for x in T1]
# the third one is displaced 3 units in the y direction from the first base node
T3 = [4*dx, dx, dx]
# the fourth candidate is also displaced 3 units in the y direction, but in the opposite
# direction (through the boundary)
T4 = [1 - 2*dx, dx, dx]
target_points = [T1, T2, T3, T4]
for point in base_points:
self.CreateNode('Base', point)
for point in target_points:
self.CreateNode('Target', point)
@staticmethod
def Norm(V):
return sum(v**2 for v in V)**0.5
def CreateNode(self, model_part_name, coordinates):
model_part = self.model.GetModelPart(model_part_name)
id = self.next_node_id
model_part.CreateNewNode(id, *coordinates)
self.all_points_db[id] = coordinates
if model_part_name == 'Base':
self.base_ids.append(id)
else:
self.target_ids.append(id)
self.next_node_id += 1
def GetExpectedListsOfNeighbors(self, search_radius):
expected_neighbors = [None for __ in self.base_ids]
for i, base_id in enumerate(self.base_ids):
neighs = set()
for target_id in self.target_ids:
if self.GetDistanceById(base_id, target_id) < search_radius:
neighs.add(target_id)
expected_neighbors[i] = frozenset(neighs)
return expected_neighbors
def GetDistanceById(self, id1, id2):
X = self.all_points_db[id1]
Y = self.all_points_db[id2]
distance = type(self).Norm(self.GetPeriodicDisplacement(X, Y))
return distance
def GetPeriodicDisplacement(self, X, Y):
sign = lambda x: -1 if x < 0 else 1
difference = [x - y for (x, y) in zip(X, Y)]
for i, d in enumerate(difference):
thorugh_boundary_difference = d - sign(d) * self.domain_box_periods[i]
if abs(d) > abs(thorugh_boundary_difference):
difference[i] = thorugh_boundary_difference
return difference
if __name__ == '__main__':
KratosUnittest.main()
|
#!/usr/bin/python3
import flask
import requests as HTTP_REQUESTS
import socket
from pdb import set_trace
import flask_fat
Journal = self = flask_fat.Journal(__file__)
""" ----------------------- ROUTES --------------------- """
@Journal.BP.route('/help', methods=['GET'])
def register_external_api():
global Journal
app = Journal.mainapp.app
routes = []
for rule in app.url_map.iter_rules():
if rule.endpoint == 'static':
continue
route = { rule.rule : list(rule.methods) }
routes.append(route)
return flask.make_response(flask.jsonify(routes), 200)
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
import os, torch
def readLabel(data, imsz):
## 좌표 값 및 크기 계산 후 return
labels = []
dh, dw, _ = imsz
for d in data:
_, x, y, w, h = list(map(float, d.split(" ")))
l = int((x - w / 2) * dw)
r = int((x + w / 2) * dw)
t = int((y - h / 2) * dh)
b = int((y + h / 2) * dh)
if l < 0:
l = 0
if r > dw - 1:
r = dw - 1
if t < 0:
t = 0
if b > dh - 1:
b = dh - 1
labels.append([l, r, t, b])
return labels
def test(param, imsz):
dh, dw, _ = imsz
rectangle = np.zeros((dw, dh), dtype="uint8")
for i in param:
# x,y 좌표, 크기
x, y, xs, ys = i
print(x, y)
print(ys)
cv2.rectangle(rectangle, (x, y), (xs, ys), 255, -1)
return rectangle
def logoOverlay(image, logo, alpha=1.0, x=0, y=0, scale=1.0):
(h, w) = image.shape[:2]
image = np.dstack([image, np.ones((h, w), dtype="uint8") * 255])
overlay = cv2.resize(logo, None, fx=scale, fy=scale)
(wH, wW) = overlay.shape[:2]
output = image.copy()
# blend the two images together using transparent overlays
try:
if x < 0: x = w + x
if y < 0: y = h + y
if x + wW > w: wW = w - x
if y + wH > h: wH = h - y
print(x, y, wW, wH)
overlay = cv2.addWeighted(output[y:y + wH, x:x + wW], alpha,
overlay[:wH, :wW], 1.0, 0)
output[y:y + wH, x:x + wW] = overlay
except Exception as e:
print("Error: Logo position is overshooting image!")
print(e)
output = output[:, :, :3]
return output
if __name__ == "__main__":
## set foloder
rootfolder = os.getcwd()
model_folder = os.path.join(rootfolder, "model_sprite")
work_folder = os.path.join(rootfolder, "test")
yolo_folder = os.path.join(rootfolder, "yolov5")
## model load
model = torch.hub.load(f'{yolo_folder}',
'custom',
path=f'{model_folder}/best.pt',
source='local',
force_reload=True) # local repo
## image read
img = cv2.imread(f"{work_folder}/test.jpg")
test_img = cv2.imread(f"{work_folder}/test.jpg")
## logo process
logo = cv2.imread(f"{work_folder}/testlogo.png", cv2.IMREAD_UNCHANGED)
results = model(img)
print(results.xyxy[0].tolist())
resultLabel = []
for d in results.xyxy[0].tolist():
x1, y1, x2, y2, _, __, = d
resultLabel.append([int(x1), int(y1), int(x2), int(y2)])
mask = test(resultLabel, img.shape)
#results.show()
cv2.imshow('test', test_img)
cv2.imshow('mask', mask)
dst = cv2.inpaint(test_img, mask, 3, cv2.INPAINT_NS)
for rl in resultLabel:
x1, y1, x2, y2 = rl
width = x2 - x1
height = y2 - y1
res = cv2.resize(logo,
dsize=(width, height),
interpolation=cv2.INTER_CUBIC)
dst = logoOverlay(dst, res, x=x1, y=y1)
cv2.imshow("out", dst)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
#!/usr/bin/python3
import datetime
import uuid
import models
class BaseModel:
"""The base class for all storage objects in this project"""
def __init__(self, *args, **kwargs):
"""initialize class object"""
if len(args) > 0:
for k in args[0]:
setattr(self, k, args[0][k])
else:
self.created_at = datetime.datetime.now()
self.id = str(uuid.uuid4())
for k in kwargs:
print("kwargs: {}: {}".format(k, kwargs[k]))
def save(self):
"""method to update self"""
self.updated_at = datetime.datetime.now()
models.storage.new(self)
models.storage.save()
def __str__(self):
"""edit string representation"""
return "[{}] ({}) {}".format(type(self)
.__name__, self.id, self.__dict__)
def to_json(self):
"""convert to json"""
dupe = self.__dict__.copy()
dupe["created_at"] = str(dupe["created_at"])
if ("updated_at" in dupe):
dupe["updated_at"] = str(dupe["updated_at"])
dupe["__class__"] = type(self).__name__
return dupe
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.